blob: c026d27c2a1c65f87109d5fe0ace84db881f3c63 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
Elliott Hughesed398002017-06-21 14:41:24 -070010 Copyright (C) 2000-2017 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njnac1e0332009-05-08 00:39:31 +000033#include "pub_tool_aspacehl.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_execontext.h"
36#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000037#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000038#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000039#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000040#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000041#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000042#include "pub_tool_mallocfree.h"
43#include "pub_tool_options.h"
njn29a5c012009-05-06 06:15:55 +000044#include "pub_tool_oset.h"
philippe6643e962012-01-17 21:16:30 +000045#include "pub_tool_poolalloc.h"
46#include "pub_tool_signals.h" // Needed for mc_include.h
sewardj6c591e12011-04-11 16:17:51 +000047#include "pub_tool_libcsetjmp.h" // setjmp facilities
njn1d0825f2006-03-27 11:37:07 +000048#include "pub_tool_tooliface.h" // Needed for mc_include.h
Elliott Hughesed398002017-06-21 14:41:24 -070049#include "pub_tool_xarray.h"
50#include "pub_tool_xtree.h"
njn43c799e2003-04-08 00:08:52 +000051
njn1d0825f2006-03-27 11:37:07 +000052#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000053
njn8225cc02009-03-09 22:52:24 +000054/*------------------------------------------------------------*/
55/*--- An overview of leak checking. ---*/
56/*------------------------------------------------------------*/
njnc7561b92005-06-19 01:24:32 +000057
njn8225cc02009-03-09 22:52:24 +000058// Leak-checking is a directed-graph traversal problem. The graph has
59// two kinds of nodes:
60// - root-set nodes:
61// - GP registers of all threads;
62// - valid, aligned, pointer-sized data words in valid client memory,
63// including stacks, but excluding words within client heap-allocated
64// blocks (they are excluded so that later on we can differentiate
65// between heap blocks that are indirectly leaked vs. directly leaked).
66// - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
67// that doesn't contain a mempool chunk. Nb: the terms "blocks" and
68// "chunks" are used interchangeably below.
69//
70// There are two kinds of edges:
71// - start-pointers, i.e. pointers to the start of a block;
72// - interior-pointers, i.e. pointers to the interior of a block.
73//
74// We use "pointers" rather than "edges" below.
75//
76// Root set nodes only point to blocks. Blocks only point to blocks;
77// a block can point to itself.
78//
79// The aim is to traverse the graph and determine the status of each block.
80//
81// There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
82// Presenting all nine categories to the user is probably too much.
83// Currently we do this:
84// - definitely lost: case 3
85// - indirectly lost: case 4, 9
86// - possibly lost: cases 5..8
87// - still reachable: cases 1, 2
88//
89// It's far from clear that this is the best possible categorisation; it's
90// accreted over time without any central guiding principle.
91
92/*------------------------------------------------------------*/
93/*--- XXX: Thoughts for improvement. ---*/
94/*------------------------------------------------------------*/
95
96// From the user's point of view:
97// - If they aren't using interior-pointers, they just have to fix the
98// directly lost blocks, and the indirectly lost ones will be fixed as
99// part of that. Any possibly lost blocks will just be due to random
100// pointer garbage and can be ignored.
101//
102// - If they are using interior-pointers, the fact that they currently are not
103// being told which ones might be directly lost vs. indirectly lost makes
104// it hard to know where to begin.
105//
106// All this makes me wonder if new option is warranted:
107// --follow-interior-pointers. By default it would be off, the leak checker
108// wouldn't follow interior-pointers and there would only be 3 categories:
109// R, DL, IL.
110//
111// If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
112// IR/IL/DL, IL/DL). That output is harder to understand but it's your own
113// damn fault for using interior-pointers...
114//
115// ----
116//
117// Also, why are two blank lines printed between each loss record?
njnc2f8b1b2009-08-10 06:47:00 +0000118// [bug 197930]
njn8225cc02009-03-09 22:52:24 +0000119//
120// ----
121//
122// Also, --show-reachable is a bad name because it also turns on the showing
123// of indirectly leaked blocks(!) It would be better named --show-all or
124// --show-all-heap-blocks, because that's the end result.
philippe2193a7c2012-12-08 17:54:16 +0000125// We now have the option --show-leak-kinds=... which allows to specify =all.
njn8225cc02009-03-09 22:52:24 +0000126//
127// ----
128//
129// Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
130// names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
131// better.
132//
133// ----
134//
135// Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
136// they combine direct leaks and indirect leaks into one. New, more precise
137// ones (they'll need new names) would be good. If more categories are
138// used, as per the --follow-interior-pointers option, they should be
139// updated accordingly. And they should use a struct to return the values.
140//
141// ----
142//
143// Also, for this case:
144//
145// (4) p4 BBB ---> AAA
146//
147// BBB is definitely directly lost. AAA is definitely indirectly lost.
148// Here's the relevant loss records printed for a full check (each block is
149// 16 bytes):
150//
151// ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
152// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
153// ==20397== by 0x400521: mk (leak-cases.c:49)
154// ==20397== by 0x400578: main (leak-cases.c:72)
155//
156// ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
157// lost in loss record 14 of 15
158// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
159// ==20397== by 0x400521: mk (leak-cases.c:49)
160// ==20397== by 0x400580: main (leak-cases.c:72)
161//
162// The first one is fine -- it describes AAA.
163//
164// The second one is for BBB. It's correct in that 16 bytes in 1 block are
165// directly lost. It's also correct that 16 are indirectly lost as a result,
166// but it means that AAA is being counted twice in the loss records. (It's
167// not, thankfully, counted twice in the summary counts). Argh.
168//
169// This would be less confusing for the second one:
170//
171// ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
172// of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
philippe2193a7c2012-12-08 17:54:16 +0000173// are mentioned elsewhere (if --show-reachable=yes or indirect is given
174// in --show-leak-kinds=... !))
njn8225cc02009-03-09 22:52:24 +0000175// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
176// ==20397== by 0x400521: mk (leak-cases.c:49)
177// ==20397== by 0x400580: main (leak-cases.c:72)
178//
179// But ideally we'd present the loss record for the directly lost block and
180// then the resultant indirectly lost blocks and make it clear the
181// dependence. Double argh.
182
183/*------------------------------------------------------------*/
184/*--- The actual algorithm. ---*/
185/*------------------------------------------------------------*/
186
187// - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
188// some special treatment because they can be within malloc'd blocks.
189// - Scan every word in the root set (GP registers and valid
190// non-heap memory words).
191// - First, we skip if it doesn't point to valid memory.
192// - Then, we see if it points to the start or interior of a block. If
193// so, we push the block onto the mark stack and mark it as having been
194// reached.
195// - Then, we process the mark stack, repeating the scanning for each block;
196// this can push more blocks onto the mark stack. We repeat until the
197// mark stack is empty. Each block is marked as definitely or possibly
198// reachable, depending on whether interior-pointers were required to
199// reach it.
200// - At this point we know for every block if it's reachable or not.
201// - We then push each unreached block onto the mark stack, using the block
202// number as the "clique" number.
203// - We process the mark stack again, this time grouping blocks into cliques
204// in order to facilitate the directly/indirectly lost categorisation.
205// - We group blocks by their ExeContexts and categorisation, and print them
206// if --leak-check=full. We also print summary numbers.
207//
208// A note on "cliques":
209// - A directly lost block is one with no pointers to it. An indirectly
210// lost block is one that is pointed to by a directly or indirectly lost
211// block.
212// - Each directly lost block has zero or more indirectly lost blocks
213// hanging off it. All these blocks together form a "clique". The
214// directly lost block is called the "clique leader". The clique number
215// is the number (in lc_chunks[]) of the clique leader.
216// - Actually, a directly lost block may be pointed to if it's part of a
217// cycle. In that case, there may be more than one choice for the clique
218// leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
219// either A or B could be the clique leader.
220// - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
221// have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
222// {B,C} (again the choice is arbitrary). This is because we don't want
223// to count a block as indirectly lost more than once.
224//
225// A note on 'is_prior_definite':
226// - This is a boolean used in various places that indicates if the chain
227// up to the prior node (prior to the one being considered) is definite.
228// - In the clique == -1 case:
229// - if True it means that the prior node is a root-set node, or that the
230// prior node is a block which is reachable from the root-set via
231// start-pointers.
232// - if False it means that the prior node is a block that is only
233// reachable from the root-set via a path including at least one
234// interior-pointer.
235// - In the clique != -1 case, currently it's always True because we treat
236// start-pointers and interior-pointers the same for direct/indirect leak
237// checking. If we added a PossibleIndirectLeak state then this would
238// change.
239
240
241// Define to debug the memory-leak-detector.
Elliott Hughesa0664b92017-04-18 17:46:52 -0700242#define VG_DEBUG_FIND_CHUNK 0
sewardjb5f6f512005-03-10 23:59:00 +0000243#define VG_DEBUG_LEAKCHECK 0
njn8225cc02009-03-09 22:52:24 +0000244#define VG_DEBUG_CLIQUE 0
245
sewardjb5f6f512005-03-10 23:59:00 +0000246
njn43c799e2003-04-08 00:08:52 +0000247/*------------------------------------------------------------*/
njn8225cc02009-03-09 22:52:24 +0000248/*--- Getting the initial chunks, and searching them. ---*/
njn43c799e2003-04-08 00:08:52 +0000249/*------------------------------------------------------------*/
250
njn8225cc02009-03-09 22:52:24 +0000251// Compare the MC_Chunks by 'data' (i.e. the address of the block).
florian6bd9dc12012-11-23 16:17:43 +0000252static Int compare_MC_Chunks(const void* n1, const void* n2)
njn43c799e2003-04-08 00:08:52 +0000253{
florian3e798632012-11-24 19:41:54 +0000254 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
255 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
njn8225cc02009-03-09 22:52:24 +0000256 if (mc1->data < mc2->data) return -1;
257 if (mc1->data > mc2->data) return 1;
258 return 0;
njn43c799e2003-04-08 00:08:52 +0000259}
260
Elliott Hughesa0664b92017-04-18 17:46:52 -0700261#if VG_DEBUG_FIND_CHUNK
njn8225cc02009-03-09 22:52:24 +0000262// Used to sanity-check the fast binary-search mechanism.
263static
264Int find_chunk_for_OLD ( Addr ptr,
265 MC_Chunk** chunks,
266 Int n_chunks )
267
268{
269 Int i;
270 Addr a_lo, a_hi;
florian60042192015-08-04 15:58:41 +0000271 PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD);
njn8225cc02009-03-09 22:52:24 +0000272 for (i = 0; i < n_chunks; i++) {
florian60042192015-08-04 15:58:41 +0000273 PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD_LOOP);
njn8225cc02009-03-09 22:52:24 +0000274 a_lo = chunks[i]->data;
275 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
Elliott Hughesa0664b92017-04-18 17:46:52 -0700276 if (a_lo == a_hi)
277 a_hi++; // Special case for szB 0. See find_chunk_for.
njn8225cc02009-03-09 22:52:24 +0000278 if (a_lo <= ptr && ptr < a_hi)
279 return i;
280 }
281 return -1;
282}
283#endif
284
285// Find the i such that ptr points at or inside the block described by
286// chunks[i]. Return -1 if none found. This assumes that chunks[]
287// has been sorted on the 'data' field.
288static
289Int find_chunk_for ( Addr ptr,
290 MC_Chunk** chunks,
291 Int n_chunks )
292{
293 Addr a_mid_lo, a_mid_hi;
294 Int lo, mid, hi, retVal;
295 // VG_(printf)("find chunk for %p = ", ptr);
296 retVal = -1;
297 lo = 0;
298 hi = n_chunks-1;
299 while (True) {
300 // Invariant: current unsearched space is from lo to hi, inclusive.
301 if (lo > hi) break; // not found
302
303 mid = (lo + hi) / 2;
304 a_mid_lo = chunks[mid]->data;
305 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
306 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
307 // Special-case zero-sized blocks - treat them as if they had
308 // size 1. Not doing so causes them to not cover any address
309 // range at all and so will never be identified as the target of
310 // any pointer, which causes them to be incorrectly reported as
311 // definitely leaked.
312 if (chunks[mid]->szB == 0)
313 a_mid_hi++;
314
315 if (ptr < a_mid_lo) {
316 hi = mid-1;
317 continue;
318 }
319 if (ptr >= a_mid_hi) {
320 lo = mid+1;
321 continue;
322 }
323 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
324 retVal = mid;
325 break;
326 }
327
Elliott Hughesa0664b92017-04-18 17:46:52 -0700328# if VG_DEBUG_FIND_CHUNK
njn8225cc02009-03-09 22:52:24 +0000329 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
330# endif
331 // VG_(printf)("%d\n", retVal);
332 return retVal;
333}
334
335
336static MC_Chunk**
florian54fe2022012-10-27 23:07:42 +0000337find_active_chunks(Int* pn_chunks)
njn8225cc02009-03-09 22:52:24 +0000338{
339 // Our goal is to construct a set of chunks that includes every
340 // mempool chunk, and every malloc region that *doesn't* contain a
341 // mempool chunk.
342 MC_Mempool *mp;
343 MC_Chunk **mallocs, **chunks, *mc;
344 UInt n_mallocs, n_chunks, m, s;
345 Bool *malloc_chunk_holds_a_pool_chunk;
346
347 // First we collect all the malloc chunks into an array and sort it.
348 // We do this because we want to query the chunks by interior
349 // pointers, requiring binary search.
350 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
351 if (n_mallocs == 0) {
352 tl_assert(mallocs == NULL);
353 *pn_chunks = 0;
354 return NULL;
355 }
356 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
357
358 // Then we build an array containing a Bool for each malloc chunk,
359 // indicating whether it contains any mempools.
360 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
361 n_mallocs, sizeof(Bool) );
362 n_chunks = n_mallocs;
363
364 // Then we loop over the mempool tables. For each chunk in each
365 // pool, we set the entry in the Bool array corresponding to the
366 // malloc chunk containing the mempool chunk.
367 VG_(HT_ResetIter)(MC_(mempool_list));
368 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
369 VG_(HT_ResetIter)(mp->chunks);
370 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
371
372 // We'll need to record this chunk.
373 n_chunks++;
374
375 // Possibly invalidate the malloc holding the beginning of this chunk.
376 m = find_chunk_for(mc->data, mallocs, n_mallocs);
377 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
378 tl_assert(n_chunks > 0);
379 n_chunks--;
380 malloc_chunk_holds_a_pool_chunk[m] = True;
381 }
382
383 // Possibly invalidate the malloc holding the end of this chunk.
384 if (mc->szB > 1) {
385 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
386 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
387 tl_assert(n_chunks > 0);
388 n_chunks--;
389 malloc_chunk_holds_a_pool_chunk[m] = True;
390 }
391 }
392 }
393 }
394 tl_assert(n_chunks > 0);
395
396 // Create final chunk array.
397 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
398 s = 0;
399
400 // Copy the mempool chunks and the non-marked malloc chunks into a
401 // combined array of chunks.
402 VG_(HT_ResetIter)(MC_(mempool_list));
403 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
404 VG_(HT_ResetIter)(mp->chunks);
405 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
406 tl_assert(s < n_chunks);
407 chunks[s++] = mc;
408 }
409 }
410 for (m = 0; m < n_mallocs; ++m) {
411 if (!malloc_chunk_holds_a_pool_chunk[m]) {
412 tl_assert(s < n_chunks);
413 chunks[s++] = mallocs[m];
414 }
415 }
416 tl_assert(s == n_chunks);
417
418 // Free temporaries.
419 VG_(free)(mallocs);
420 VG_(free)(malloc_chunk_holds_a_pool_chunk);
421
422 *pn_chunks = n_chunks;
423
424 return chunks;
425}
426
427/*------------------------------------------------------------*/
428/*--- The leak detector proper. ---*/
429/*------------------------------------------------------------*/
430
431// Holds extra info about each block during leak checking.
432typedef
433 struct {
434 UInt state:2; // Reachedness.
philippeab1fce92013-09-29 13:47:32 +0000435 UInt pending:1; // Scan pending.
436 UInt heuristic: (sizeof(UInt)*8)-3;
437 // Heuristic with which this block was considered reachable.
438 // LchNone if state != Reachable or no heuristic needed to
439 // consider it reachable.
440
philippea22f59d2012-01-26 23:13:52 +0000441 union {
philippeab1fce92013-09-29 13:47:32 +0000442 SizeT indirect_szB;
443 // If Unreached, how many bytes are unreachable from here.
444 SizeT clique;
445 // if IndirectLeak, clique leader to which it belongs.
philippea22f59d2012-01-26 23:13:52 +0000446 } IorC;
njn8225cc02009-03-09 22:52:24 +0000447 }
448 LC_Extra;
449
450// An array holding pointers to every chunk we're checking. Sorted by address.
philippea22f59d2012-01-26 23:13:52 +0000451// lc_chunks is initialised during leak search. It is kept after leak search
452// to support printing the list of blocks belonging to a loss record.
453// lc_chunk array can only be used validly till the next "free" operation
454// (as a free operation potentially destroys one or more chunks).
455// To detect lc_chunk is valid, we store the nr of frees operations done
456// when lc_chunk was build : lc_chunks (and lc_extras) stays valid as
457// long as no free operations has been done since lc_chunks building.
njn8225cc02009-03-09 22:52:24 +0000458static MC_Chunk** lc_chunks;
459// How many chunks we're dealing with.
460static Int lc_n_chunks;
philippea22f59d2012-01-26 23:13:52 +0000461static SizeT lc_chunks_n_frees_marker;
462// This has the same number of entries as lc_chunks, and each entry
463// in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
464// lc_extras[i] describe the same block).
465static LC_Extra* lc_extras;
466
sewardjc8bd1df2011-06-26 12:41:33 +0000467// chunks will be converted and merged in loss record, maintained in lr_table
468// lr_table elements are kept from one leak_search to another to implement
469// the "print new/changed leaks" client request
470static OSet* lr_table;
philippea22f59d2012-01-26 23:13:52 +0000471// Array of sorted loss record (produced during last leak search).
472static LossRecord** lr_array;
473
philippeab1fce92013-09-29 13:47:32 +0000474// Value of the heuristics parameter used in the current (or last) leak check.
475static UInt detect_memory_leaks_last_heuristics;
sewardjc8bd1df2011-06-26 12:41:33 +0000476
477// DeltaMode used the last time we called detect_memory_leaks.
philippeab1fce92013-09-29 13:47:32 +0000478// The recorded leak errors are output using a logic based on this delta_mode.
sewardjc8bd1df2011-06-26 12:41:33 +0000479// The below avoids replicating the delta_mode in each LossRecord.
480LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
481
philippe4e32d672013-10-17 22:10:41 +0000482// Each leak search run increments the below generation counter.
483// A used suppression during a leak search will contain this
484// generation number.
485UInt MC_(leak_search_gen);
njn8225cc02009-03-09 22:52:24 +0000486
njn8225cc02009-03-09 22:52:24 +0000487// Records chunks that are currently being processed. Each element in the
488// stack is an index into lc_chunks and lc_extras. Its size is
489// 'lc_n_chunks' because in the worst case that's how many chunks could be
490// pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
491// be conservative).
492static Int* lc_markstack;
493// The index of the top element of the stack; -1 if the stack is empty, 0 if
494// the stack has one element, 1 if it has two, etc.
495static Int lc_markstack_top;
496
497// Keeps track of how many bytes of memory we've scanned, for printing.
498// (Nb: We don't keep track of how many register bytes we've scanned.)
499static SizeT lc_scanned_szB;
philippe7a76f4b2013-10-06 21:23:04 +0000500// Keeps track of how many bytes we have not scanned due to read errors that
501// caused a signal such as SIGSEGV.
502static SizeT lc_sig_skipped_szB;
njn8225cc02009-03-09 22:52:24 +0000503
504
505SizeT MC_(bytes_leaked) = 0;
506SizeT MC_(bytes_indirect) = 0;
507SizeT MC_(bytes_dubious) = 0;
508SizeT MC_(bytes_reachable) = 0;
509SizeT MC_(bytes_suppressed) = 0;
510
511SizeT MC_(blocks_leaked) = 0;
512SizeT MC_(blocks_indirect) = 0;
513SizeT MC_(blocks_dubious) = 0;
514SizeT MC_(blocks_reachable) = 0;
515SizeT MC_(blocks_suppressed) = 0;
516
philippeab1fce92013-09-29 13:47:32 +0000517// Subset of MC_(bytes_reachable) and MC_(blocks_reachable) which
518// are considered reachable due to the corresponding heuristic.
519static SizeT MC_(bytes_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
520 = {0,0,0,0};
521static SizeT MC_(blocks_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
522 = {0,0,0,0};
523
njn8225cc02009-03-09 22:52:24 +0000524// Determines if a pointer is to a chunk. Returns the chunk number et al
525// via call-by-reference.
526static Bool
527lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
njn43c799e2003-04-08 00:08:52 +0000528{
njn8225cc02009-03-09 22:52:24 +0000529 Int ch_no;
530 MC_Chunk* ch;
531 LC_Extra* ex;
njn43c799e2003-04-08 00:08:52 +0000532
philippe57a16a22012-07-18 22:26:51 +0000533 // Quick filter. Note: implemented with am, not with get_vabits2
534 // as ptr might be random data pointing anywhere. On 64 bit
535 // platforms, getting va bits for random data can be quite costly
536 // due to the secondary map.
njn8225cc02009-03-09 22:52:24 +0000537 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
538 return False;
sewardjb5f6f512005-03-10 23:59:00 +0000539 } else {
njn8225cc02009-03-09 22:52:24 +0000540 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
541 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
542
543 if (ch_no == -1) {
544 return False;
545 } else {
546 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
547 // LC_Extra.
548 ch = lc_chunks[ch_no];
549 ex = &(lc_extras[ch_no]);
550
551 tl_assert(ptr >= ch->data);
552 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
553
554 if (VG_DEBUG_LEAKCHECK)
555 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
556
557 *pch_no = ch_no;
558 *pch = ch;
559 *pex = ex;
560
561 return True;
562 }
sewardjb5f6f512005-03-10 23:59:00 +0000563 }
564}
565
njn8225cc02009-03-09 22:52:24 +0000566// Push a chunk (well, just its index) onto the mark stack.
567static void lc_push(Int ch_no, MC_Chunk* ch)
sewardjb5f6f512005-03-10 23:59:00 +0000568{
tom1d0f3f62010-10-04 20:55:21 +0000569 if (!lc_extras[ch_no].pending) {
570 if (0) {
571 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
572 }
573 lc_markstack_top++;
574 tl_assert(lc_markstack_top < lc_n_chunks);
575 lc_markstack[lc_markstack_top] = ch_no;
576 tl_assert(!lc_extras[ch_no].pending);
577 lc_extras[ch_no].pending = True;
njn8225cc02009-03-09 22:52:24 +0000578 }
sewardjb5f6f512005-03-10 23:59:00 +0000579}
580
njn8225cc02009-03-09 22:52:24 +0000581// Return the index of the chunk on the top of the mark stack, or -1 if
582// there isn't one.
583static Bool lc_pop(Int* ret)
sewardjb5f6f512005-03-10 23:59:00 +0000584{
njn8225cc02009-03-09 22:52:24 +0000585 if (-1 == lc_markstack_top) {
586 return False;
587 } else {
588 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
589 *ret = lc_markstack[lc_markstack_top];
590 lc_markstack_top--;
tom1d0f3f62010-10-04 20:55:21 +0000591 tl_assert(lc_extras[*ret].pending);
592 lc_extras[*ret].pending = False;
njn8225cc02009-03-09 22:52:24 +0000593 return True;
594 }
595}
sewardjb5f6f512005-03-10 23:59:00 +0000596
philippeab1fce92013-09-29 13:47:32 +0000597static const HChar* pp_heuristic(LeakCheckHeuristic h)
598{
599 switch(h) {
600 case LchNone: return "none";
601 case LchStdString: return "stdstring";
philippe7c69a3e2014-07-21 19:55:11 +0000602 case LchLength64: return "length64";
philippeab1fce92013-09-29 13:47:32 +0000603 case LchNewArray: return "newarray";
604 case LchMultipleInheritance: return "multipleinheritance";
605 default: return "???invalid heuristic???";
606 }
607}
608
609// True if ptr looks like the address of a vtable, i.e. if ptr
610// points to an array of pointers to functions.
611// It is assumed the only caller of this function is heuristic_reachedness
612// which must check that ptr is aligned and above page 0.
613// Checking that ptr is above page 0 is an optimisation : it is assumed
614// that no vtable is located in the page 0. So, all small integer values
615// encountered during the scan will not incur the cost of calling this
616// function.
617static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
618{
619 // ??? If performance problem:
620 // ??? maybe implement a cache (array indexed by ptr % primenr)
621 // ??? of "I am a vtable ptr" ???
622
623 // ??? Maybe the debug info could (efficiently?) be used to detect vtables ?
624
625 // We consider ptr as a vtable ptr if it points to a table
626 // where we find only NULL pointers or pointers pointing at an
627 // executable region. We must find at least 2 non NULL pointers
628 // before considering ptr as a vtable pointer.
629 // We scan a maximum of VTABLE_MAX_CHECK words for these 2 non NULL
630 // pointers.
631#define VTABLE_MAX_CHECK 20
632
633 NSegment const *seg;
634 UInt nr_fn_ptrs = 0;
635 Addr scan;
636 Addr scan_max;
637
638 // First verify ptr points inside a client mapped file section.
639 // ??? is a vtable always in a file mapped readable section ?
640 seg = VG_(am_find_nsegment) (ptr);
641 if (seg == NULL
642 || seg->kind != SkFileC
643 || !seg->hasR)
644 return False;
645
646 // Check potential function pointers, up to a maximum of VTABLE_MAX_CHECK.
647 scan_max = ptr + VTABLE_MAX_CHECK*sizeof(Addr);
648 // If ptr is near the end of seg, avoid scan_max exceeding the end of seg:
649 if (scan_max > seg->end - sizeof(Addr))
650 scan_max = seg->end - sizeof(Addr);
651 for (scan = ptr; scan <= scan_max; scan+=sizeof(Addr)) {
652 Addr pot_fn = *((Addr *)scan);
653 if (pot_fn == 0)
654 continue; // NULL fn pointer. Seems it can happen in vtable.
655 seg = VG_(am_find_nsegment) (pot_fn);
carll3ed5c702015-05-15 16:50:06 +0000656#if defined(VGA_ppc64be)
657 // ppc64BE uses a thunk table (function descriptors), so we have one
658 // more level of indirection to follow.
philippeab1fce92013-09-29 13:47:32 +0000659 if (seg == NULL
660 || seg->kind != SkFileC
661 || !seg->hasR
662 || !seg->hasW)
663 return False; // ptr to nowhere, or not a ptr to thunks.
664 pot_fn = *((Addr *)pot_fn);
665 if (pot_fn == 0)
666 continue; // NULL fn pointer. Seems it can happen in vtable.
667 seg = VG_(am_find_nsegment) (pot_fn);
668#endif
669 if (seg == NULL
670 || seg->kind != SkFileC
671 || !seg->hasT)
672 return False; // ptr to nowhere, or not a fn ptr.
673 nr_fn_ptrs++;
674 if (nr_fn_ptrs == 2)
675 return True;
676 }
677
678 return False;
679}
680
philippe7c69a3e2014-07-21 19:55:11 +0000681// true if a is properly aligned and points to 64bits of valid memory
682static Bool is_valid_aligned_ULong ( Addr a )
683{
684 if (sizeof(Word) == 8)
685 return MC_(is_valid_aligned_word)(a);
686
687 return MC_(is_valid_aligned_word)(a)
688 && MC_(is_valid_aligned_word)(a + 4);
689}
690
philippeb5a02e72015-10-22 19:14:30 +0000691/* The below leak_search_fault_catcher is used to catch memory access
692 errors happening during leak_search. During the scan, we check
693 with aspacemgr and/or VA bits that each page or dereferenced location is
694 readable and belongs to the client. However, we still protect
695 against SIGSEGV and SIGBUS e.g. in case aspacemgr is desynchronised
696 with the real page mappings. Such a desynchronisation could happen
697 due to an aspacemgr bug. Note that if the application is using
698 mprotect(NONE), then a page can be unreadable but have addressable
699 and defined VA bits (see mc_main.c function mc_new_mem_mprotect).
700 Currently, 2 functions are dereferencing client memory during leak search:
701 heuristic_reachedness and lc_scan_memory.
702 Each such function has its own fault catcher, that will call
703 leak_search_fault_catcher with the proper 'who' and jmpbuf parameters. */
704static volatile Addr bad_scanned_addr;
705static
706void leak_search_fault_catcher ( Int sigNo, Addr addr,
707 const HChar *who, VG_MINIMAL_JMP_BUF(jmpbuf) )
708{
709 vki_sigset_t sigmask;
710
711 if (0)
712 VG_(printf)("OUCH! sig=%d addr=%#lx who=%s\n", sigNo, addr, who);
713
714 /* Signal handler runs with the signal masked.
715 Unmask the handled signal before longjmp-ing or return-ing.
716 Note that during leak search, we expect only SIGSEGV or SIGBUS
Elliott Hughesed398002017-06-21 14:41:24 -0700717 and we do not expect another occurrence until we longjmp-ed!return-ed
philippeb5a02e72015-10-22 19:14:30 +0000718 to resume the leak search. So, it is safe to unmask the signal
719 here. */
720 /* First get current mask (by passing NULL as first arg) */
721 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
722 /* Then set a new sigmask, with this signal removed from the mask. */
723 VG_(sigdelset)(&sigmask, sigNo);
724 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
725
726 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS) {
727 bad_scanned_addr = addr;
728 VG_MINIMAL_LONGJMP(jmpbuf);
729 } else {
730 /* ??? During leak search, we are not supposed to receive any
731 other sync signal that these 2.
732 In theory, we should not call VG_(umsg) in a signal handler,
733 but better (try to) report this unexpected behaviour. */
734 VG_(umsg)("leak_search_fault_catcher:"
735 " unexpected signal %d, catcher %s ???\n",
736 sigNo, who);
737 }
738}
739
740// jmpbuf and fault_catcher used during heuristic_reachedness
741static VG_MINIMAL_JMP_BUF(heuristic_reachedness_jmpbuf);
742static
743void heuristic_reachedness_fault_catcher ( Int sigNo, Addr addr )
744{
745 leak_search_fault_catcher (sigNo, addr,
746 "heuristic_reachedness_fault_catcher",
747 heuristic_reachedness_jmpbuf);
748}
749
philippeab1fce92013-09-29 13:47:32 +0000750// If ch is heuristically reachable via an heuristic member of heur_set,
751// returns this heuristic.
752// If ch cannot be considered reachable using one of these heuristics,
753// return LchNone.
754// This should only be called when ptr is an interior ptr to ch.
755// The StdString/NewArray/MultipleInheritance heuristics are directly
756// inspired from DrMemory:
757// see http://www.burningcutlery.com/derek/docs/drmem-CGO11.pdf [section VI,C]
758// and bug 280271.
759static LeakCheckHeuristic heuristic_reachedness (Addr ptr,
760 MC_Chunk *ch, LC_Extra *ex,
761 UInt heur_set)
762{
philippeb5a02e72015-10-22 19:14:30 +0000763
764 fault_catcher_t prev_catcher;
765
766 prev_catcher = VG_(set_fault_catcher)(heuristic_reachedness_fault_catcher);
767
768 // See leak_search_fault_catcher
769 if (VG_MINIMAL_SETJMP(heuristic_reachedness_jmpbuf) != 0) {
770 VG_(set_fault_catcher) (prev_catcher);
771 return LchNone;
772 }
773
philippeab1fce92013-09-29 13:47:32 +0000774 if (HiS(LchStdString, heur_set)) {
775 // Detects inner pointers to Std::String for layout being
776 // length capacity refcount char_array[] \0
777 // where ptr points to the beginning of the char_array.
philippe078ab862013-10-13 18:38:30 +0000778 // Note: we check definedness for length and capacity but
779 // not for refcount, as refcount size might be smaller than
780 // a SizeT, giving a uninitialised hole in the first 3 SizeT.
781 if ( ptr == ch->data + 3 * sizeof(SizeT)
782 && MC_(is_valid_aligned_word)(ch->data + sizeof(SizeT))) {
783 const SizeT capacity = *((SizeT*)(ch->data + sizeof(SizeT)));
784 if (3 * sizeof(SizeT) + capacity + 1 == ch->szB
785 && MC_(is_valid_aligned_word)(ch->data)) {
786 const SizeT length = *((SizeT*)ch->data);
787 if (length <= capacity) {
788 // ??? could check there is no null byte from ptr to ptr+length-1
789 // ??? and that there is a null byte at ptr+length.
790 // ???
791 // ??? could check that ch->allockind is MC_AllocNew ???
792 // ??? probably not a good idea, as I guess stdstring
793 // ??? allocator can be done via custom allocator
794 // ??? or even a call to malloc ????
philippeb5a02e72015-10-22 19:14:30 +0000795 VG_(set_fault_catcher) (prev_catcher);
philippe078ab862013-10-13 18:38:30 +0000796 return LchStdString;
797 }
philippeab1fce92013-09-29 13:47:32 +0000798 }
799 }
800 }
801
philippe7c69a3e2014-07-21 19:55:11 +0000802 if (HiS(LchLength64, heur_set)) {
803 // Detects inner pointers that point at 64bit offset (8 bytes) into a
804 // block following the length of the remaining as 64bit number
805 // (=total block size - 8).
806 // This is used e.g. by sqlite for tracking the total size of allocated
807 // memory.
808 // Note that on 64bit platforms, a block matching LchLength64 will
809 // also be matched by LchNewArray.
810 if ( ptr == ch->data + sizeof(ULong)
811 && is_valid_aligned_ULong(ch->data)) {
812 const ULong size = *((ULong*)ch->data);
813 if (size > 0 && (ch->szB - sizeof(ULong)) == size) {
philippeb5a02e72015-10-22 19:14:30 +0000814 VG_(set_fault_catcher) (prev_catcher);
philippe7c69a3e2014-07-21 19:55:11 +0000815 return LchLength64;
816 }
817 }
818 }
819
philippeab1fce92013-09-29 13:47:32 +0000820 if (HiS(LchNewArray, heur_set)) {
821 // Detects inner pointers at second word of new[] array, following
822 // a plausible nr of elements.
823 // Such inner pointers are used for arrays of elements
824 // having a destructor, as the delete[] of the array must know
825 // how many elements to destroy.
826 //
827 // We have a strange/wrong case for 'ptr = new MyClass[0];' :
828 // For such a case, the returned ptr points just outside the
829 // allocated chunk. This chunk is then seen as a definite
830 // leak by Valgrind, as it is not considered an interior pointer.
831 // It is the c++ equivalent of bug 99923 (malloc(0) wrongly considered
832 // as definitely leaked). See the trick in find_chunk_for handling
833 // 0-sized block. This trick does not work for 'new MyClass[0]'
834 // because a chunk "word-sized" is allocated to store the (0) nr
835 // of elements.
philippe078ab862013-10-13 18:38:30 +0000836 if ( ptr == ch->data + sizeof(SizeT)
837 && MC_(is_valid_aligned_word)(ch->data)) {
philippeab1fce92013-09-29 13:47:32 +0000838 const SizeT nr_elts = *((SizeT*)ch->data);
839 if (nr_elts > 0 && (ch->szB - sizeof(SizeT)) % nr_elts == 0) {
840 // ??? could check that ch->allockind is MC_AllocNewVec ???
philippeb5a02e72015-10-22 19:14:30 +0000841 VG_(set_fault_catcher) (prev_catcher);
philippeab1fce92013-09-29 13:47:32 +0000842 return LchNewArray;
843 }
844 }
845 }
846
847 if (HiS(LchMultipleInheritance, heur_set)) {
848 // Detect inner pointer used for multiple inheritance.
849 // Assumption is that the vtable pointers are before the object.
philippe078ab862013-10-13 18:38:30 +0000850 if (VG_IS_WORD_ALIGNED(ptr)
851 && MC_(is_valid_aligned_word)(ptr)) {
philippeab1fce92013-09-29 13:47:32 +0000852 Addr first_addr;
853 Addr inner_addr;
854
855 // Avoid the call to is_vtable_addr when the addr is not
856 // aligned or points in the page0, as it is unlikely
857 // a vtable is located in this page. This last optimisation
858 // avoids to call aligned_ptr_above_page0_is_vtable_addr
859 // for all small integers.
860 // Note: we could possibly also avoid calling this function
861 // for small negative integers, as no vtable should be located
862 // in the last page.
863 inner_addr = *((Addr*)ptr);
864 if (VG_IS_WORD_ALIGNED(inner_addr)
philippe078ab862013-10-13 18:38:30 +0000865 && inner_addr >= (Addr)VKI_PAGE_SIZE
866 && MC_(is_valid_aligned_word)(ch->data)) {
philippeab1fce92013-09-29 13:47:32 +0000867 first_addr = *((Addr*)ch->data);
868 if (VG_IS_WORD_ALIGNED(first_addr)
869 && first_addr >= (Addr)VKI_PAGE_SIZE
870 && aligned_ptr_above_page0_is_vtable_addr(inner_addr)
871 && aligned_ptr_above_page0_is_vtable_addr(first_addr)) {
872 // ??? could check that ch->allockind is MC_AllocNew ???
philippeb5a02e72015-10-22 19:14:30 +0000873 VG_(set_fault_catcher) (prev_catcher);
philippeab1fce92013-09-29 13:47:32 +0000874 return LchMultipleInheritance;
875 }
876 }
877 }
878 }
879
philippeb5a02e72015-10-22 19:14:30 +0000880 VG_(set_fault_catcher) (prev_catcher);
philippeab1fce92013-09-29 13:47:32 +0000881 return LchNone;
882}
883
njn8225cc02009-03-09 22:52:24 +0000884
885// If 'ptr' is pointing to a heap-allocated block which hasn't been seen
886// before, push it onto the mark stack.
887static void
888lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
889{
890 Int ch_no;
891 MC_Chunk* ch;
892 LC_Extra* ex;
philippeab1fce92013-09-29 13:47:32 +0000893 Reachedness ch_via_ptr; // Is ch reachable via ptr, and how ?
njn8225cc02009-03-09 22:52:24 +0000894
895 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
896 return;
philippeab1fce92013-09-29 13:47:32 +0000897
898 if (ex->state == Reachable) {
philippe078ab862013-10-13 18:38:30 +0000899 if (ex->heuristic && ptr == ch->data)
900 // If block was considered reachable via an heuristic, and it is now
901 // directly reachable via ptr, clear the heuristic field.
philippeab1fce92013-09-29 13:47:32 +0000902 ex->heuristic = LchNone;
philippeab1fce92013-09-29 13:47:32 +0000903 return;
904 }
tom1d0f3f62010-10-04 20:55:21 +0000905
njn8225cc02009-03-09 22:52:24 +0000906 // Possibly upgrade the state, ie. one of:
907 // - Unreached --> Possible
908 // - Unreached --> Reachable
909 // - Possible --> Reachable
philippeab1fce92013-09-29 13:47:32 +0000910
911 if (ptr == ch->data)
912 ch_via_ptr = Reachable;
913 else if (detect_memory_leaks_last_heuristics) {
914 ex->heuristic
915 = heuristic_reachedness (ptr, ch, ex,
916 detect_memory_leaks_last_heuristics);
917 if (ex->heuristic)
918 ch_via_ptr = Reachable;
919 else
920 ch_via_ptr = Possible;
921 } else
922 ch_via_ptr = Possible;
923
924 if (ch_via_ptr == Reachable && is_prior_definite) {
925 // 'ptr' points to the start of the block or is to be considered as
926 // pointing to the start of the block, and the prior node is
njn8225cc02009-03-09 22:52:24 +0000927 // definite, which means that this block is definitely reachable.
928 ex->state = Reachable;
929
tom1d0f3f62010-10-04 20:55:21 +0000930 // State has changed to Reachable so (re)scan the block to make
931 // sure any blocks it points to are correctly marked.
932 lc_push(ch_no, ch);
933
njn8225cc02009-03-09 22:52:24 +0000934 } else if (ex->state == Unreached) {
935 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
936 // which means that we can only mark this block as possibly reachable.
937 ex->state = Possible;
tom1d0f3f62010-10-04 20:55:21 +0000938
939 // State has changed to Possible so (re)scan the block to make
940 // sure any blocks it points to are correctly marked.
941 lc_push(ch_no, ch);
njn8225cc02009-03-09 22:52:24 +0000942 }
943}
944
945static void
florian6bd9dc12012-11-23 16:17:43 +0000946lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
njn8225cc02009-03-09 22:52:24 +0000947{
948 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
949}
950
951// If ptr is pointing to a heap-allocated block which hasn't been seen
952// before, push it onto the mark stack. Clique is the index of the
953// clique leader.
954static void
philippea22f59d2012-01-26 23:13:52 +0000955lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
njn8225cc02009-03-09 22:52:24 +0000956{
957 Int ch_no;
958 MC_Chunk* ch;
959 LC_Extra* ex;
960
961 tl_assert(0 <= clique && clique < lc_n_chunks);
962
963 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
964 return;
965
966 // If it's not Unreached, it's already been handled so ignore it.
967 // If ch_no==clique, it's the clique leader, which means this is a cyclic
968 // structure; again ignore it because it's already been handled.
969 if (ex->state == Unreached && ch_no != clique) {
970 // Note that, unlike reachable blocks, we currently don't distinguish
971 // between start-pointers and interior-pointers here. We probably
972 // should, though.
njn8225cc02009-03-09 22:52:24 +0000973 lc_push(ch_no, ch);
974
975 // Add the block to the clique, and add its size to the
976 // clique-leader's indirect size. Also, if the new block was
977 // itself a clique leader, it isn't any more, so add its
978 // indirect_szB to the new clique leader.
979 if (VG_DEBUG_CLIQUE) {
philippea22f59d2012-01-26 23:13:52 +0000980 if (ex->IorC.indirect_szB > 0)
njn8225cc02009-03-09 22:52:24 +0000981 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
florian47755db2015-08-05 12:09:55 +0000982 ch_no, clique, (SizeT)ch->szB, ex->IorC.indirect_szB);
njn8225cc02009-03-09 22:52:24 +0000983 else
984 VG_(printf)(" block %d joining clique %d adding %lu\n",
florian47755db2015-08-05 12:09:55 +0000985 ch_no, clique, (SizeT)ch->szB);
njn8225cc02009-03-09 22:52:24 +0000986 }
987
philippea22f59d2012-01-26 23:13:52 +0000988 lc_extras[clique].IorC.indirect_szB += ch->szB;
989 lc_extras[clique].IorC.indirect_szB += ex->IorC.indirect_szB;
990 ex->state = IndirectLeak;
991 ex->IorC.clique = (SizeT) cur_clique;
njn8225cc02009-03-09 22:52:24 +0000992 }
993}
994
995static void
philippeab1fce92013-09-29 13:47:32 +0000996lc_push_if_a_chunk_ptr(Addr ptr,
997 Int clique, Int cur_clique, Bool is_prior_definite)
njn8225cc02009-03-09 22:52:24 +0000998{
999 if (-1 == clique)
1000 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
1001 else
philippea22f59d2012-01-26 23:13:52 +00001002 lc_push_with_clique_if_a_chunk_ptr(ptr, clique, cur_clique);
sewardjb5f6f512005-03-10 23:59:00 +00001003}
1004
sewardj45d94cc2005-04-20 14:44:11 +00001005
philippeb5a02e72015-10-22 19:14:30 +00001006static VG_MINIMAL_JMP_BUF(lc_scan_memory_jmpbuf);
njn8225cc02009-03-09 22:52:24 +00001007static
philippeb5a02e72015-10-22 19:14:30 +00001008void lc_scan_memory_fault_catcher ( Int sigNo, Addr addr )
sewardjb5f6f512005-03-10 23:59:00 +00001009{
philippeb5a02e72015-10-22 19:14:30 +00001010 leak_search_fault_catcher (sigNo, addr,
1011 "lc_scan_memory_fault_catcher",
1012 lc_scan_memory_jmpbuf);
njn8225cc02009-03-09 22:52:24 +00001013}
1014
philippea22f59d2012-01-26 23:13:52 +00001015// lc_scan_memory has 2 modes:
1016//
1017// 1. Leak check mode (searched == 0).
1018// -----------------------------------
njn8225cc02009-03-09 22:52:24 +00001019// Scan a block of memory between [start, start+len). This range may
florianad4e9792015-07-05 21:53:33 +00001020// be bogus, inaccessible, or otherwise strange; we deal with it. For each
njn8225cc02009-03-09 22:52:24 +00001021// valid aligned word we assume it's a pointer to a chunk a push the chunk
1022// onto the mark stack if so.
philippea22f59d2012-01-26 23:13:52 +00001023// clique is the "highest level clique" in which indirectly leaked blocks have
1024// to be collected. cur_clique is the current "lower" level clique through which
1025// the memory to be scanned has been found.
1026// Example: in the below tree if A is leaked, the top level clique will
1027// be A, while lower level cliques will be B and C.
1028/*
1029 A
florianf5300ff2014-12-28 16:46:14 +00001030 / \
philippea22f59d2012-01-26 23:13:52 +00001031 B C
florianf5300ff2014-12-28 16:46:14 +00001032 / \ / \
philippea22f59d2012-01-26 23:13:52 +00001033 D E F G
1034*/
1035// Proper handling of top and lowest level clique allows block_list of a loss
1036// record to describe the hierarchy of indirectly leaked blocks.
1037//
1038// 2. Search ptr mode (searched != 0).
1039// -----------------------------------
1040// In this mode, searches for pointers to a specific address range
philippeab1fce92013-09-29 13:47:32 +00001041// In such a case, lc_scan_memory just scans [start..start+len[ for pointers
1042// to searched and outputs the places where searched is found.
1043// It does not recursively scans the found memory.
njn8225cc02009-03-09 22:52:24 +00001044static void
philippeab1fce92013-09-29 13:47:32 +00001045lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
1046 Int clique, Int cur_clique,
philippea22f59d2012-01-26 23:13:52 +00001047 Addr searched, SizeT szB)
njn8225cc02009-03-09 22:52:24 +00001048{
philippe57a16a22012-07-18 22:26:51 +00001049 /* memory scan is based on the assumption that valid pointers are aligned
1050 on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
1051 end portions of the block if they are not aligned on sizeof(Addr):
1052 These cannot be a valid pointer, and calls to MC_(is_valid_aligned_word)
1053 will assert for a non aligned address. */
philippe110c77e2013-10-15 21:04:56 +00001054#if defined(VGA_s390x)
1055 // Define ptr as volatile, as on this platform, the value of ptr
1056 // is read in code executed via a longjmp.
1057 volatile
1058#endif
philippe7a76f4b2013-10-06 21:23:04 +00001059 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
1060 const Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
philippeb5a02e72015-10-22 19:14:30 +00001061 fault_catcher_t prev_catcher;
sewardjb5f6f512005-03-10 23:59:00 +00001062
1063 if (VG_DEBUG_LEAKCHECK)
njn8225cc02009-03-09 22:52:24 +00001064 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
1065
philippeb5a02e72015-10-22 19:14:30 +00001066 prev_catcher = VG_(set_fault_catcher)(lc_scan_memory_fault_catcher);
sewardjb5f6f512005-03-10 23:59:00 +00001067
philippe57a16a22012-07-18 22:26:51 +00001068 /* Optimisation: the loop below will check for each begin
1069 of SM chunk if the chunk is fully unaddressable. The idea is to
1070 skip efficiently such fully unaddressable SM chunks.
florianad4e9792015-07-05 21:53:33 +00001071 So, we preferably start the loop on a chunk boundary.
philippe57a16a22012-07-18 22:26:51 +00001072 If the chunk is not fully unaddressable, we might be in
1073 an unaddressable page. Again, the idea is to skip efficiently
1074 such unaddressable page : this is the "else" part.
1075 We use an "else" so that two consecutive fully unaddressable
1076 SM chunks will be skipped efficiently: first one is skipped
1077 by this piece of code. The next SM chunk will be skipped inside
1078 the loop. */
1079 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
1080 // Skip an invalid SM chunk till the beginning of the next SM Chunk.
1081 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
1082 } else if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1083 // else we are in a (at least partially) valid SM chunk.
1084 // We might be in the middle of an unreadable page.
1085 // Do a cheap check to see if it's valid;
1086 // if not, skip onto the next page.
njn8225cc02009-03-09 22:52:24 +00001087 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
philippe57a16a22012-07-18 22:26:51 +00001088 }
philippe7a76f4b2013-10-06 21:23:04 +00001089 /* The above optimisation and below loop is based on some relationships
1090 between VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
philippe57a16a22012-07-18 22:26:51 +00001091 MC_(detect_memory_leaks). */
sewardjb5f6f512005-03-10 23:59:00 +00001092
philippeb5a02e72015-10-22 19:14:30 +00001093 // See leak_search_fault_catcher
1094 if (VG_MINIMAL_SETJMP(lc_scan_memory_jmpbuf) != 0) {
philippe7a76f4b2013-10-06 21:23:04 +00001095 // Catch read error ...
philippe110c77e2013-10-15 21:04:56 +00001096# if defined(VGA_s390x)
1097 // For a SIGSEGV, s390 delivers the page address of the bad address.
1098 // For a SIGBUS, old s390 kernels deliver a NULL address.
1099 // bad_scanned_addr can thus not be used.
1100 // So, on this platform, we always skip a full page from ptr.
1101 // The below implies to mark ptr as volatile, as we read the value
1102 // after a longjmp to here.
1103 lc_sig_skipped_szB += VKI_PAGE_SIZE;
1104 ptr = ptr + VKI_PAGE_SIZE; // Unaddressable, - skip it.
1105# else
1106 // On other platforms, just skip one Addr.
philippe7a76f4b2013-10-06 21:23:04 +00001107 lc_sig_skipped_szB += sizeof(Addr);
1108 tl_assert(bad_scanned_addr >= VG_ROUNDUP(start, sizeof(Addr)));
1109 tl_assert(bad_scanned_addr < VG_ROUNDDN(start+len, sizeof(Addr)));
1110 ptr = bad_scanned_addr + sizeof(Addr); // Unaddressable, - skip it.
philippe110c77e2013-10-15 21:04:56 +00001111#endif
philippe7a76f4b2013-10-06 21:23:04 +00001112 }
sewardj05fe85e2005-04-27 22:46:36 +00001113 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +00001114 Addr addr;
1115
njn8225cc02009-03-09 22:52:24 +00001116 // Skip invalid chunks.
philippe57a16a22012-07-18 22:26:51 +00001117 if (UNLIKELY((ptr % SM_SIZE) == 0)) {
1118 if (! MC_(is_within_valid_secondary)(ptr) ) {
1119 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
1120 continue;
1121 }
sewardjb5f6f512005-03-10 23:59:00 +00001122 }
1123
njn8225cc02009-03-09 22:52:24 +00001124 // Look to see if this page seems reasonable.
philippe57a16a22012-07-18 22:26:51 +00001125 if (UNLIKELY((ptr % VKI_PAGE_SIZE) == 0)) {
njn8225cc02009-03-09 22:52:24 +00001126 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
1127 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
1128 continue;
1129 }
sewardjb5f6f512005-03-10 23:59:00 +00001130 }
1131
philippe57a16a22012-07-18 22:26:51 +00001132 if ( MC_(is_valid_aligned_word)(ptr) ) {
1133 lc_scanned_szB += sizeof(Addr);
philippe7a76f4b2013-10-06 21:23:04 +00001134 // If the below read fails, we will longjmp to the loop begin.
philippe57a16a22012-07-18 22:26:51 +00001135 addr = *(Addr *)ptr;
1136 // If we get here, the scanned word is in valid memory. Now
1137 // let's see if its contents point to a chunk.
1138 if (UNLIKELY(searched)) {
1139 if (addr >= searched && addr < searched + szB) {
philippeab1fce92013-09-29 13:47:32 +00001140 if (addr == searched) {
philippe57a16a22012-07-18 22:26:51 +00001141 VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
philippeab1fce92013-09-29 13:47:32 +00001142 MC_(pp_describe_addr) (ptr);
1143 } else {
1144 Int ch_no;
1145 MC_Chunk *ch;
1146 LC_Extra *ex;
philippe57a16a22012-07-18 22:26:51 +00001147 VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
1148 ptr, (long unsigned) addr - searched, searched);
philippeab1fce92013-09-29 13:47:32 +00001149 MC_(pp_describe_addr) (ptr);
1150 if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
1151 Int h;
philippe7c69a3e2014-07-21 19:55:11 +00001152 for (h = LchStdString; h < N_LEAK_CHECK_HEURISTICS; h++) {
philippeab1fce92013-09-29 13:47:32 +00001153 if (heuristic_reachedness(addr, ch, ex, H2S(h)) == h) {
1154 VG_(umsg)("block at %#lx considered reachable "
1155 "by ptr %#lx using %s heuristic\n",
1156 ch->data, addr, pp_heuristic(h));
1157 }
1158 }
philippe7a76f4b2013-10-06 21:23:04 +00001159 // Verify the loop above has properly scanned all
1160 // heuristics. If the below fails, it probably means the
1161 // LeakCheckHeuristic enum is not in sync anymore with the
1162 // above loop and/or with N_LEAK_CHECK_HEURISTICS.
philippe5bd40602013-10-02 20:59:05 +00001163 tl_assert (h == N_LEAK_CHECK_HEURISTICS);
philippeab1fce92013-09-29 13:47:32 +00001164 }
1165 }
philippea22f59d2012-01-26 23:13:52 +00001166 }
philippe57a16a22012-07-18 22:26:51 +00001167 } else {
1168 lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
njn8225cc02009-03-09 22:52:24 +00001169 }
philippe57a16a22012-07-18 22:26:51 +00001170 } else if (0 && VG_DEBUG_LEAKCHECK) {
1171 VG_(printf)("%#lx not valid\n", ptr);
sewardjb5f6f512005-03-10 23:59:00 +00001172 }
philippe57a16a22012-07-18 22:26:51 +00001173 ptr += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +00001174 }
1175
philippeb5a02e72015-10-22 19:14:30 +00001176 VG_(set_fault_catcher)(prev_catcher);
sewardjb5f6f512005-03-10 23:59:00 +00001177}
1178
sewardj45d94cc2005-04-20 14:44:11 +00001179
njn8225cc02009-03-09 22:52:24 +00001180// Process the mark stack until empty.
1181static void lc_process_markstack(Int clique)
sewardjb5f6f512005-03-10 23:59:00 +00001182{
njne3675d62009-05-19 02:08:25 +00001183 Int top = -1; // shut gcc up
njn8225cc02009-03-09 22:52:24 +00001184 Bool is_prior_definite;
sewardjb5f6f512005-03-10 23:59:00 +00001185
njn8225cc02009-03-09 22:52:24 +00001186 while (lc_pop(&top)) {
tom1d0f3f62010-10-04 20:55:21 +00001187 tl_assert(top >= 0 && top < lc_n_chunks);
sewardjb5f6f512005-03-10 23:59:00 +00001188
njn8225cc02009-03-09 22:52:24 +00001189 // See comment about 'is_prior_definite' at the top to understand this.
1190 is_prior_definite = ( Possible != lc_extras[top].state );
sewardjb5f6f512005-03-10 23:59:00 +00001191
njn8225cc02009-03-09 22:52:24 +00001192 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
philippea22f59d2012-01-26 23:13:52 +00001193 is_prior_definite, clique, (clique == -1 ? -1 : top),
1194 /*searched*/ 0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001195 }
1196}
1197
njn29a5c012009-05-06 06:15:55 +00001198static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
1199{
florian3e798632012-11-24 19:41:54 +00001200 const LossRecordKey* a = key;
1201 const LossRecordKey* b = &(((const LossRecord*)elem)->key);
njn29a5c012009-05-06 06:15:55 +00001202
1203 // Compare on states first because that's fast.
1204 if (a->state < b->state) return -1;
1205 if (a->state > b->state) return 1;
1206 // Ok, the states are equal. Now compare the locations, which is slower.
1207 if (VG_(eq_ExeContext)(
1208 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
1209 return 0;
1210 // Different locations. Ordering is arbitrary, just use the ec pointer.
1211 if (a->allocated_at < b->allocated_at) return -1;
1212 if (a->allocated_at > b->allocated_at) return 1;
1213 VG_(tool_panic)("bad LossRecord comparison");
1214}
1215
florian6bd9dc12012-11-23 16:17:43 +00001216static Int cmp_LossRecords(const void* va, const void* vb)
njn29a5c012009-05-06 06:15:55 +00001217{
florian3e798632012-11-24 19:41:54 +00001218 const LossRecord* lr_a = *(const LossRecord *const *)va;
1219 const LossRecord* lr_b = *(const LossRecord *const *)vb;
njn29a5c012009-05-06 06:15:55 +00001220 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
1221 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
1222
1223 // First compare by sizes.
1224 if (total_szB_a < total_szB_b) return -1;
1225 if (total_szB_a > total_szB_b) return 1;
1226 // If size are equal, compare by states.
1227 if (lr_a->key.state < lr_b->key.state) return -1;
1228 if (lr_a->key.state > lr_b->key.state) return 1;
njne10c7f82009-05-06 06:52:47 +00001229 // If they're still equal here, it doesn't matter that much, but we keep
1230 // comparing other things so that regtests are as deterministic as
1231 // possible. So: compare num_blocks.
1232 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
1233 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
1234 // Finally, compare ExeContext addresses... older ones are likely to have
1235 // lower addresses.
1236 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
1237 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
njn29a5c012009-05-06 06:15:55 +00001238 return 0;
1239}
1240
philippea22f59d2012-01-26 23:13:52 +00001241// allocates or reallocates lr_array, and set its elements to the loss records
1242// contains in lr_table.
florian47755db2015-08-05 12:09:55 +00001243static UInt get_lr_array_from_lr_table(void) {
1244 UInt i, n_lossrecords;
philippea22f59d2012-01-26 23:13:52 +00001245 LossRecord* lr;
1246
1247 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1248
1249 // (re-)create the array of pointers to the loss records.
1250 // lr_array is kept to allow producing the block list from gdbserver.
1251 if (lr_array != NULL)
1252 VG_(free)(lr_array);
1253 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
1254 i = 0;
1255 VG_(OSetGen_ResetIter)(lr_table);
1256 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
1257 lr_array[i++] = lr;
1258 }
1259 tl_assert(i == n_lossrecords);
1260 return n_lossrecords;
1261}
1262
philippe84234902012-01-14 13:53:13 +00001263
1264static void get_printing_rules(LeakCheckParams* lcp,
1265 LossRecord* lr,
1266 Bool* count_as_error,
1267 Bool* print_record)
sewardjb5f6f512005-03-10 23:59:00 +00001268{
philippe84234902012-01-14 13:53:13 +00001269 // Rules for printing:
1270 // - We don't show suppressed loss records ever (and that's controlled
1271 // within the error manager).
philippe2193a7c2012-12-08 17:54:16 +00001272 // - We show non-suppressed loss records that are specified in
1273 // --show-leak-kinds=... if --leak-check=yes.
philippe84234902012-01-14 13:53:13 +00001274
1275 Bool delta_considered;
1276
1277 switch (lcp->deltamode) {
1278 case LCD_Any:
1279 delta_considered = lr->num_blocks > 0;
1280 break;
1281 case LCD_Increased:
1282 delta_considered
1283 = lr->szB > lr->old_szB
1284 || lr->indirect_szB > lr->old_indirect_szB
1285 || lr->num_blocks > lr->old_num_blocks;
1286 break;
1287 case LCD_Changed:
1288 delta_considered = lr->szB != lr->old_szB
1289 || lr->indirect_szB != lr->old_indirect_szB
1290 || lr->num_blocks != lr->old_num_blocks;
1291 break;
1292 default:
1293 tl_assert(0);
1294 }
1295
philippe2193a7c2012-12-08 17:54:16 +00001296 *print_record = lcp->mode == LC_Full && delta_considered
1297 && RiS(lr->key.state,lcp->show_leak_kinds);
philippe84234902012-01-14 13:53:13 +00001298 // We don't count a leaks as errors with lcp->mode==LC_Summary.
1299 // Otherwise you can get high error counts with few or no error
philippe2193a7c2012-12-08 17:54:16 +00001300 // messages, which can be confusing. Otherwise, we count as errors
1301 // the leak kinds requested by --errors-for-leak-kinds=...
1302 *count_as_error = lcp->mode == LC_Full && delta_considered
1303 && RiS(lr->key.state,lcp->errors_for_leak_kinds);
philippe84234902012-01-14 13:53:13 +00001304}
1305
Elliott Hughesed398002017-06-21 14:41:24 -07001306//
1307// Types and functions for xtree leak report.
1308//
1309
1310static XTree* leak_xt;
1311
1312/* Sizes and delta sizes for a loss record output in an xtree.
1313 As the output format can only show positive values, we need values for
1314 the increase and decrease cases. */
1315typedef
1316 struct _XT_BIBK {
1317 ULong szB; // Current values
1318 ULong indirect_szB;
1319 ULong num_blocks;
1320 } XT_BIBK; // Bytes, Indirect bytes, BlocKs
1321
1322typedef
1323 enum {
1324 XT_Value =0,
1325 XT_Increase =1,
1326 XT_Decrease =2
1327 }
1328 XT_VID; // Value or Increase or Decrease
1329
1330typedef
1331 struct _XT_lr {
1332 XT_BIBK vid[3]; // indexed by XT_VID
1333 } XT_lr;
1334
1335typedef
1336 struct _XT_Leak {
1337 XT_lr xt_lr[4]; // indexed by Reachedness
1338 } XT_Leak;
1339
1340static void MC_(XT_Leak_init)(void* xtl)
1341{
1342 VG_(memset) (xtl, 0, sizeof(XT_Leak));
1343}
1344static void MC_(XT_Leak_add) (void* to, const void* xtleak)
1345{
1346 XT_Leak* xto = to;
1347 const XT_Leak* xtl = xtleak;
1348
1349 for (int r = Reachable; r <= Unreached; r++)
1350 for (int d = 0; d < 3; d++) {
1351 xto->xt_lr[r].vid[d].szB += xtl->xt_lr[r].vid[d].szB;
1352 xto->xt_lr[r].vid[d].indirect_szB += xtl->xt_lr[r].vid[d].indirect_szB;
1353 xto->xt_lr[r].vid[d].num_blocks += xtl->xt_lr[r].vid[d].num_blocks;
1354 }
1355}
1356static void XT_insert_lr (LossRecord* lr)
1357{
1358 XT_Leak xtl;
1359 Reachedness i = lr->key.state;
1360
1361 MC_(XT_Leak_init)(&xtl);
1362
1363 xtl.xt_lr[i].vid[XT_Value].szB = lr->szB;
1364 xtl.xt_lr[i].vid[XT_Value].indirect_szB = lr->indirect_szB;
1365 xtl.xt_lr[i].vid[XT_Value].num_blocks = lr->num_blocks;
1366
1367 if (lr->szB > lr->old_szB)
1368 xtl.xt_lr[i].vid[XT_Increase].szB = lr->szB - lr->old_szB;
1369 else
1370 xtl.xt_lr[i].vid[XT_Decrease].szB = lr->old_szB - lr->szB;
1371 if (lr->indirect_szB > lr->old_indirect_szB)
1372 xtl.xt_lr[i].vid[XT_Increase].indirect_szB
1373 = lr->indirect_szB - lr->old_indirect_szB;
1374 else
1375 xtl.xt_lr[i].vid[XT_Decrease].indirect_szB
1376 = lr->old_indirect_szB - lr->indirect_szB;
1377 if (lr->num_blocks > lr->old_num_blocks)
1378 xtl.xt_lr[i].vid[XT_Increase].num_blocks
1379 = lr->num_blocks - lr->old_num_blocks;
1380 else
1381 xtl.xt_lr[i].vid[XT_Decrease].num_blocks
1382 = lr->old_num_blocks - lr->num_blocks;
1383
1384 VG_(XT_add_to_ec)(leak_xt, lr->key.allocated_at, &xtl);
1385}
1386
1387static void MC_(XT_Leak_sub) (void* from, const void* xtleak)
1388{
1389 tl_assert(0); // Should not be called.
1390}
1391static const HChar* MC_(XT_Leak_img) (const void* xtleak)
1392{
1393 static XT_Leak zero;
1394 static HChar buf[600];
1395 UInt off = 0;
1396
1397 const XT_Leak* xtl = xtleak;
1398
1399 if (VG_(memcmp)(xtl, &zero, sizeof(XT_Leak)) != 0) {
1400 for (UInt d = XT_Value; d <= XT_Decrease; d++) {
1401 // print szB. We add indirect_szB to have the Unreachable showing
1402 // the total bytes loss, including indirect loss. This is similar
1403 // to the textual and xml reports.
1404 for (UInt r = Reachable; r <= Unreached; r++)
1405 off += VG_(sprintf) (buf + off, " %llu",
1406 xtl->xt_lr[r].vid[d].szB
1407 + xtl->xt_lr[r].vid[d].indirect_szB);
1408 // print indirect_szB, only for reachedness having such values)
1409 for (UInt r = Reachable; r <= Unreached; r++)
1410 if (r == Unreached)
1411 off += VG_(sprintf) (buf + off, " %llu",
1412 xtl->xt_lr[r].vid[d].indirect_szB);
1413 // print num_blocks
1414 for (UInt r = Reachable; r <= Unreached; r++)
1415 off += VG_(sprintf) (buf + off, " %llu",
1416 xtl->xt_lr[r].vid[d].num_blocks);
1417 }
1418 return buf + 1; // + 1 to skip the useless first space
1419 } else {
1420 return NULL;
1421 }
1422}
1423
1424/* The short event name is made of 2 or 3 or 4 letters:
1425 an optional delta indication: i = increase d = decrease
1426 a loss kind: R = Reachable P = Possibly I = Indirectly D = Definitely
1427 an optional i to indicate this loss record has indirectly lost bytes
1428 B = Bytes or Bk = Blocks.
1429 Note that indirectly lost bytes/blocks can thus be counted in 2
1430 loss records: the loss records for their "own" allocation stack trace,
1431 and the loss record of the 'main' Definitely or Possibly loss record
1432 in the indirectly lost count for these loss records. */
1433static const HChar* XT_Leak_events =
1434 ////// XT_Value szB
1435 "RB : Reachable Bytes" ","
1436 "PB : Possibly lost Bytes" ","
1437 "IB : Indirectly lost Bytes" ","
1438 "DB : Definitely lost Bytes (direct plus indirect)" ","
1439
1440 ////// XT_Value indirect_szB
1441 // no RIB
1442 // no PIB
1443 // no IIB
1444 "DIB : Definitely Indirectly lost Bytes (subset of DB)" ","
1445
1446 ////// XT_Value num_blocks
1447 "RBk : reachable Blocks" ","
1448 "PBk : Possibly lost Blocks" ","
1449 "IBk : Indirectly lost Blocks" ","
1450 "DBk : Definitely lost Blocks" ","
1451
1452 ////// XT_Increase szB
1453 "iRB : increase Reachable Bytes" ","
1454 "iPB : increase Possibly lost Bytes" ","
1455 "iIB : increase Indirectly lost Bytes" ","
1456 "iDB : increase Definitely lost Bytes" ","
1457
1458 ////// XT_Increase indirect_szB
1459 // no iRIB
1460 // no iPIB
1461 // no iIIB
1462 "iDIB : increase Definitely Indirectly lost Bytes" ","
1463
1464 ////// XT_Increase num_blocks
1465 "iRBk : increase reachable Blocks" ","
1466 "iPBk : increase Possibly lost Blocks" ","
1467 "iIBk : increase Indirectly lost Blocks" ","
1468 "iDBk : increase Definitely lost Blocks" ","
1469
1470
1471 ////// XT_Decrease szB
1472 "dRB : decrease Reachable Bytes" ","
1473 "dPB : decrease Possibly lost Bytes" ","
1474 "dIB : decrease Indirectly lost Bytes" ","
1475 "dDB : decrease Definitely lost Bytes" ","
1476
1477 ////// XT_Decrease indirect_szB
1478 // no dRIB
1479 // no dPIB
1480 // no dIIB
1481 "dDIB : decrease Definitely Indirectly lost Bytes" ","
1482
1483 ////// XT_Decrease num_blocks
1484 "dRBk : decrease reachable Blocks" ","
1485 "dPBk : decrease Possibly lost Blocks" ","
1486 "dIBk : decrease Indirectly lost Blocks" ","
1487 "dDBk : decrease Definitely lost Blocks";
1488
philippe84234902012-01-14 13:53:13 +00001489static void print_results(ThreadId tid, LeakCheckParams* lcp)
1490{
1491 Int i, n_lossrecords, start_lr_output_scan;
njn29a5c012009-05-06 06:15:55 +00001492 LossRecord* lr;
1493 Bool is_suppressed;
philippeab1fce92013-09-29 13:47:32 +00001494 /* old_* variables are used to report delta in summary. */
1495 SizeT old_bytes_leaked = MC_(bytes_leaked);
sewardjc8bd1df2011-06-26 12:41:33 +00001496 SizeT old_bytes_indirect = MC_(bytes_indirect);
1497 SizeT old_bytes_dubious = MC_(bytes_dubious);
1498 SizeT old_bytes_reachable = MC_(bytes_reachable);
1499 SizeT old_bytes_suppressed = MC_(bytes_suppressed);
1500 SizeT old_blocks_leaked = MC_(blocks_leaked);
1501 SizeT old_blocks_indirect = MC_(blocks_indirect);
1502 SizeT old_blocks_dubious = MC_(blocks_dubious);
1503 SizeT old_blocks_reachable = MC_(blocks_reachable);
1504 SizeT old_blocks_suppressed = MC_(blocks_suppressed);
sewardjb5f6f512005-03-10 23:59:00 +00001505
philippeab1fce92013-09-29 13:47:32 +00001506 SizeT old_bytes_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1507 SizeT old_blocks_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1508
1509 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++) {
1510 old_bytes_heuristically_reachable[i]
1511 = MC_(bytes_heuristically_reachable)[i];
1512 MC_(bytes_heuristically_reachable)[i] = 0;
1513 old_blocks_heuristically_reachable[i]
1514 = MC_(blocks_heuristically_reachable)[i];
1515 MC_(blocks_heuristically_reachable)[i] = 0;
1516 }
1517
sewardjc8bd1df2011-06-26 12:41:33 +00001518 if (lr_table == NULL)
1519 // Create the lr_table, which holds the loss records.
1520 // If the lr_table already exists, it means it contains
1521 // loss_records from the previous leak search. The old_*
1522 // values in these records are used to implement the
1523 // leak check delta mode
1524 lr_table =
1525 VG_(OSetGen_Create)(offsetof(LossRecord, key),
1526 cmp_LossRecordKey_LossRecord,
1527 VG_(malloc), "mc.pr.1",
1528 VG_(free));
1529
philippea22f59d2012-01-26 23:13:52 +00001530 // If we have loss records from a previous search, reset values to have
1531 // proper printing of the deltas between previous search and this search.
1532 n_lossrecords = get_lr_array_from_lr_table();
1533 for (i = 0; i < n_lossrecords; i++) {
philippe4bbfc5f2012-02-27 21:52:45 +00001534 if (lr_array[i]->num_blocks == 0) {
philippea22f59d2012-01-26 23:13:52 +00001535 // remove from lr_table the old loss_records with 0 bytes found
1536 VG_(OSetGen_Remove) (lr_table, &lr_array[i]->key);
philippe4bbfc5f2012-02-27 21:52:45 +00001537 VG_(OSetGen_FreeNode)(lr_table, lr_array[i]);
1538 } else {
philippea22f59d2012-01-26 23:13:52 +00001539 // move the leak sizes to old_* and zero the current sizes
1540 // for next leak search
1541 lr_array[i]->old_szB = lr_array[i]->szB;
1542 lr_array[i]->old_indirect_szB = lr_array[i]->indirect_szB;
1543 lr_array[i]->old_num_blocks = lr_array[i]->num_blocks;
1544 lr_array[i]->szB = 0;
1545 lr_array[i]->indirect_szB = 0;
1546 lr_array[i]->num_blocks = 0;
1547 }
1548 }
1549 // lr_array now contains "invalid" loss records => free it.
1550 // lr_array will be re-created below with the kept and new loss records.
1551 VG_(free) (lr_array);
1552 lr_array = NULL;
njn29a5c012009-05-06 06:15:55 +00001553
1554 // Convert the chunks into loss records, merging them where appropriate.
njn8225cc02009-03-09 22:52:24 +00001555 for (i = 0; i < lc_n_chunks; i++) {
njn29a5c012009-05-06 06:15:55 +00001556 MC_Chunk* ch = lc_chunks[i];
1557 LC_Extra* ex = &(lc_extras)[i];
1558 LossRecord* old_lr;
1559 LossRecordKey lrkey;
1560 lrkey.state = ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001561 lrkey.allocated_at = MC_(allocated_at)(ch);
sewardjb5f6f512005-03-10 23:59:00 +00001562
philippeab1fce92013-09-29 13:47:32 +00001563 if (ex->heuristic) {
1564 MC_(bytes_heuristically_reachable)[ex->heuristic] += ch->szB;
1565 MC_(blocks_heuristically_reachable)[ex->heuristic]++;
1566 if (VG_DEBUG_LEAKCHECK)
1567 VG_(printf)("heuristic %s %#lx len %lu\n",
1568 pp_heuristic(ex->heuristic),
florian47755db2015-08-05 12:09:55 +00001569 ch->data, (SizeT)ch->szB);
philippeab1fce92013-09-29 13:47:32 +00001570 }
1571
njn29a5c012009-05-06 06:15:55 +00001572 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1573 if (old_lr) {
1574 // We found an existing loss record matching this chunk. Update the
1575 // loss record's details in-situ. This is safe because we don't
1576 // change the elements used as the OSet key.
1577 old_lr->szB += ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001578 if (ex->state == Unreached)
1579 old_lr->indirect_szB += ex->IorC.indirect_szB;
njn29a5c012009-05-06 06:15:55 +00001580 old_lr->num_blocks++;
sewardjb5f6f512005-03-10 23:59:00 +00001581 } else {
njn29a5c012009-05-06 06:15:55 +00001582 // No existing loss record matches this chunk. Create a new loss
1583 // record, initialise it from the chunk, and insert it into lr_table.
1584 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
1585 lr->key = lrkey;
1586 lr->szB = ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001587 if (ex->state == Unreached)
1588 lr->indirect_szB = ex->IorC.indirect_szB;
1589 else
1590 lr->indirect_szB = 0;
njn29a5c012009-05-06 06:15:55 +00001591 lr->num_blocks = 1;
sewardjc8bd1df2011-06-26 12:41:33 +00001592 lr->old_szB = 0;
1593 lr->old_indirect_szB = 0;
1594 lr->old_num_blocks = 0;
njn29a5c012009-05-06 06:15:55 +00001595 VG_(OSetGen_Insert)(lr_table, lr);
sewardjb5f6f512005-03-10 23:59:00 +00001596 }
1597 }
1598
philippea22f59d2012-01-26 23:13:52 +00001599 // (re-)create the array of pointers to the (new) loss records.
1600 n_lossrecords = get_lr_array_from_lr_table ();
1601 tl_assert(VG_(OSetGen_Size)(lr_table) == n_lossrecords);
njn29a5c012009-05-06 06:15:55 +00001602
1603 // Sort the array by loss record sizes.
1604 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
1605 cmp_LossRecords);
1606
1607 // Zero totals.
njn8225cc02009-03-09 22:52:24 +00001608 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
1609 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
1610 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
1611 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
1612 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
1613
philippe84234902012-01-14 13:53:13 +00001614 // If there is a maximum nr of loss records we can output, then first
1615 // compute from where the output scan has to start.
1616 // By default, start from the first loss record. Compute a higher
1617 // value if there is a maximum to respect. We need to print the last
1618 // records, as the one with the biggest sizes are more interesting.
1619 start_lr_output_scan = 0;
1620 if (lcp->mode == LC_Full && lcp->max_loss_records_output < n_lossrecords) {
1621 Int nr_printable_records = 0;
1622 for (i = n_lossrecords - 1; i >= 0 && start_lr_output_scan == 0; i--) {
1623 Bool count_as_error, print_record;
1624 lr = lr_array[i];
1625 get_printing_rules (lcp, lr, &count_as_error, &print_record);
1626 // Do not use get_printing_rules results for is_suppressed, as we
1627 // only want to check if the record would be suppressed.
1628 is_suppressed =
1629 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr,
1630 False /* print_record */,
1631 False /* count_as_error */);
1632 if (print_record && !is_suppressed) {
1633 nr_printable_records++;
1634 if (nr_printable_records == lcp->max_loss_records_output)
1635 start_lr_output_scan = i;
1636 }
sewardjc8bd1df2011-06-26 12:41:33 +00001637 }
philippe84234902012-01-14 13:53:13 +00001638 }
sewardjc8bd1df2011-06-26 12:41:33 +00001639
Elliott Hughesed398002017-06-21 14:41:24 -07001640 if (lcp->xt_filename != NULL)
1641 leak_xt = VG_(XT_create) (VG_(malloc),
1642 "mc_leakcheck.leak_xt",
1643 VG_(free),
1644 sizeof(XT_Leak),
1645 MC_(XT_Leak_init),
1646 MC_(XT_Leak_add),
1647 MC_(XT_Leak_sub),
1648 VG_(XT_filter_maybe_below_main));
1649
philippe84234902012-01-14 13:53:13 +00001650 // Print the loss records (in size order) and collect summary stats.
1651 for (i = start_lr_output_scan; i < n_lossrecords; i++) {
1652 Bool count_as_error, print_record;
1653 lr = lr_array[i];
1654 get_printing_rules(lcp, lr, &count_as_error, &print_record);
sewardjb5f6f512005-03-10 23:59:00 +00001655 is_suppressed =
Elliott Hughesed398002017-06-21 14:41:24 -07001656 MC_(record_leak_error)
1657 ( tid, i+1, n_lossrecords, lr,
1658 lcp->xt_filename == NULL ? print_record : False,
1659 count_as_error );
1660 if (lcp->xt_filename != NULL && !is_suppressed && print_record)
1661 XT_insert_lr (lr);
sewardjb5f6f512005-03-10 23:59:00 +00001662
1663 if (is_suppressed) {
njn29a5c012009-05-06 06:15:55 +00001664 MC_(blocks_suppressed) += lr->num_blocks;
1665 MC_(bytes_suppressed) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001666
njn29a5c012009-05-06 06:15:55 +00001667 } else if (Unreached == lr->key.state) {
1668 MC_(blocks_leaked) += lr->num_blocks;
1669 MC_(bytes_leaked) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001670
njn29a5c012009-05-06 06:15:55 +00001671 } else if (IndirectLeak == lr->key.state) {
1672 MC_(blocks_indirect) += lr->num_blocks;
1673 MC_(bytes_indirect) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001674
njn29a5c012009-05-06 06:15:55 +00001675 } else if (Possible == lr->key.state) {
1676 MC_(blocks_dubious) += lr->num_blocks;
1677 MC_(bytes_dubious) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001678
njn29a5c012009-05-06 06:15:55 +00001679 } else if (Reachable == lr->key.state) {
1680 MC_(blocks_reachable) += lr->num_blocks;
1681 MC_(bytes_reachable) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001682
1683 } else {
njn8225cc02009-03-09 22:52:24 +00001684 VG_(tool_panic)("unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +00001685 }
sewardjb5f6f512005-03-10 23:59:00 +00001686 }
sewardjb5f6f512005-03-10 23:59:00 +00001687
Elliott Hughesed398002017-06-21 14:41:24 -07001688 if (lcp->xt_filename != NULL) {
1689 VG_(XT_callgrind_print)(leak_xt,
1690 lcp->xt_filename,
1691 XT_Leak_events,
1692 MC_(XT_Leak_img));
1693 if (VG_(clo_verbosity) >= 1 || lcp->requested_by_monitor_command)
1694 VG_(umsg)("xtree leak report: %s\n", lcp->xt_filename);
1695 VG_(XT_delete)(leak_xt);
1696 }
1697
njn8225cc02009-03-09 22:52:24 +00001698 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
floriancf6e7342014-09-28 13:29:06 +00001699 HChar d_bytes[31];
1700 HChar d_blocks[31];
philippeab1fce92013-09-29 13:47:32 +00001701# define DBY(new,old) \
floriancf6e7342014-09-28 13:29:06 +00001702 MC_(snprintf_delta) (d_bytes, sizeof(d_bytes), (new), (old), \
1703 lcp->deltamode)
philippeab1fce92013-09-29 13:47:32 +00001704# define DBL(new,old) \
floriancf6e7342014-09-28 13:29:06 +00001705 MC_(snprintf_delta) (d_blocks, sizeof(d_blocks), (new), (old), \
1706 lcp->deltamode)
sewardjc8bd1df2011-06-26 12:41:33 +00001707
sewardj6b523cd2009-07-15 14:49:40 +00001708 VG_(umsg)("LEAK SUMMARY:\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001709 VG_(umsg)(" definitely lost: %'lu%s bytes in %'lu%s blocks\n",
1710 MC_(bytes_leaked),
philippeab1fce92013-09-29 13:47:32 +00001711 DBY (MC_(bytes_leaked), old_bytes_leaked),
sewardjc8bd1df2011-06-26 12:41:33 +00001712 MC_(blocks_leaked),
philippeab1fce92013-09-29 13:47:32 +00001713 DBL (MC_(blocks_leaked), old_blocks_leaked));
sewardjc8bd1df2011-06-26 12:41:33 +00001714 VG_(umsg)(" indirectly lost: %'lu%s bytes in %'lu%s blocks\n",
1715 MC_(bytes_indirect),
philippeab1fce92013-09-29 13:47:32 +00001716 DBY (MC_(bytes_indirect), old_bytes_indirect),
sewardjc8bd1df2011-06-26 12:41:33 +00001717 MC_(blocks_indirect),
philippeab1fce92013-09-29 13:47:32 +00001718 DBL (MC_(blocks_indirect), old_blocks_indirect));
sewardjc8bd1df2011-06-26 12:41:33 +00001719 VG_(umsg)(" possibly lost: %'lu%s bytes in %'lu%s blocks\n",
1720 MC_(bytes_dubious),
philippeab1fce92013-09-29 13:47:32 +00001721 DBY (MC_(bytes_dubious), old_bytes_dubious),
sewardjc8bd1df2011-06-26 12:41:33 +00001722 MC_(blocks_dubious),
philippeab1fce92013-09-29 13:47:32 +00001723 DBL (MC_(blocks_dubious), old_blocks_dubious));
sewardjc8bd1df2011-06-26 12:41:33 +00001724 VG_(umsg)(" still reachable: %'lu%s bytes in %'lu%s blocks\n",
1725 MC_(bytes_reachable),
philippeab1fce92013-09-29 13:47:32 +00001726 DBY (MC_(bytes_reachable), old_bytes_reachable),
sewardjc8bd1df2011-06-26 12:41:33 +00001727 MC_(blocks_reachable),
philippeab1fce92013-09-29 13:47:32 +00001728 DBL (MC_(blocks_reachable), old_blocks_reachable));
1729 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1730 if (old_blocks_heuristically_reachable[i] > 0
1731 || MC_(blocks_heuristically_reachable)[i] > 0) {
1732 VG_(umsg)(" of which "
1733 "reachable via heuristic:\n");
1734 break;
1735 }
1736 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1737 if (old_blocks_heuristically_reachable[i] > 0
1738 || MC_(blocks_heuristically_reachable)[i] > 0)
florian866862a2014-12-13 18:35:00 +00001739 VG_(umsg)(" %-19s: "
philippeab1fce92013-09-29 13:47:32 +00001740 "%'lu%s bytes in %'lu%s blocks\n",
1741 pp_heuristic(i),
1742 MC_(bytes_heuristically_reachable)[i],
1743 DBY (MC_(bytes_heuristically_reachable)[i],
1744 old_bytes_heuristically_reachable[i]),
1745 MC_(blocks_heuristically_reachable)[i],
1746 DBL (MC_(blocks_heuristically_reachable)[i],
1747 old_blocks_heuristically_reachable[i]));
sewardjc8bd1df2011-06-26 12:41:33 +00001748 VG_(umsg)(" suppressed: %'lu%s bytes in %'lu%s blocks\n",
1749 MC_(bytes_suppressed),
philippeab1fce92013-09-29 13:47:32 +00001750 DBY (MC_(bytes_suppressed), old_bytes_suppressed),
sewardjc8bd1df2011-06-26 12:41:33 +00001751 MC_(blocks_suppressed),
philippeab1fce92013-09-29 13:47:32 +00001752 DBL (MC_(blocks_suppressed), old_blocks_suppressed));
philippe84234902012-01-14 13:53:13 +00001753 if (lcp->mode != LC_Full &&
njn8225cc02009-03-09 22:52:24 +00001754 (MC_(blocks_leaked) + MC_(blocks_indirect) +
1755 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
philippe84234902012-01-14 13:53:13 +00001756 if (lcp->requested_by_monitor_command)
philippeab1fce92013-09-29 13:47:32 +00001757 VG_(umsg)("To see details of leaked memory, "
1758 "give 'full' arg to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001759 else
1760 VG_(umsg)("Rerun with --leak-check=full to see details "
1761 "of leaked memory\n");
njn8225cc02009-03-09 22:52:24 +00001762 }
philippe84234902012-01-14 13:53:13 +00001763 if (lcp->mode == LC_Full &&
philippeab1fce92013-09-29 13:47:32 +00001764 MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds)) {
sewardj6b523cd2009-07-15 14:49:40 +00001765 VG_(umsg)("Reachable blocks (those to which a pointer "
1766 "was found) are not shown.\n");
philippe84234902012-01-14 13:53:13 +00001767 if (lcp->requested_by_monitor_command)
sewardj30b3eca2011-06-28 08:20:39 +00001768 VG_(umsg)("To see them, add 'reachable any' args to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001769 else
1770 VG_(umsg)("To see them, rerun with: --leak-check=full "
philippe2193a7c2012-12-08 17:54:16 +00001771 "--show-leak-kinds=all\n");
sewardjb5f6f512005-03-10 23:59:00 +00001772 }
njnb6267bd2009-08-12 00:14:16 +00001773 VG_(umsg)("\n");
philippeab1fce92013-09-29 13:47:32 +00001774 #undef DBL
1775 #undef DBY
sewardjb5f6f512005-03-10 23:59:00 +00001776 }
1777}
1778
philippea22f59d2012-01-26 23:13:52 +00001779// print recursively all indirectly leaked blocks collected in clique.
philippe6d3cb492015-08-13 22:49:32 +00001780// Printing stops when *remaining reaches 0.
1781static void print_clique (Int clique, UInt level, UInt *remaining)
philippea22f59d2012-01-26 23:13:52 +00001782{
1783 Int ind;
florian47755db2015-08-05 12:09:55 +00001784 UInt i, n_lossrecords;
philippea22f59d2012-01-26 23:13:52 +00001785
1786 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1787
philippe6d3cb492015-08-13 22:49:32 +00001788 for (ind = 0; ind < lc_n_chunks && *remaining > 0; ind++) {
philippea22f59d2012-01-26 23:13:52 +00001789 LC_Extra* ind_ex = &(lc_extras)[ind];
philippeab1fce92013-09-29 13:47:32 +00001790 if (ind_ex->state == IndirectLeak
1791 && ind_ex->IorC.clique == (SizeT) clique) {
philippea22f59d2012-01-26 23:13:52 +00001792 MC_Chunk* ind_ch = lc_chunks[ind];
1793 LossRecord* ind_lr;
1794 LossRecordKey ind_lrkey;
florian47755db2015-08-05 12:09:55 +00001795 UInt lr_i;
philippea22f59d2012-01-26 23:13:52 +00001796 ind_lrkey.state = ind_ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001797 ind_lrkey.allocated_at = MC_(allocated_at)(ind_ch);
philippea22f59d2012-01-26 23:13:52 +00001798 ind_lr = VG_(OSetGen_Lookup)(lr_table, &ind_lrkey);
1799 for (lr_i = 0; lr_i < n_lossrecords; lr_i++)
1800 if (ind_lr == lr_array[lr_i])
1801 break;
1802 for (i = 0; i < level; i++)
1803 VG_(umsg)(" ");
florian47755db2015-08-05 12:09:55 +00001804 VG_(umsg)("%p[%lu] indirect loss record %u\n",
1805 (void *)ind_ch->data, (SizeT)ind_ch->szB,
philippea22f59d2012-01-26 23:13:52 +00001806 lr_i+1); // lr_i+1 for user numbering.
philippe6d3cb492015-08-13 22:49:32 +00001807 (*remaining)--;
philippea22f59d2012-01-26 23:13:52 +00001808 if (lr_i >= n_lossrecords)
1809 VG_(umsg)
1810 ("error: no indirect loss record found for %p[%lu]?????\n",
florian47755db2015-08-05 12:09:55 +00001811 (void *)ind_ch->data, (SizeT)ind_ch->szB);
philippe6d3cb492015-08-13 22:49:32 +00001812 print_clique(ind, level+1, remaining);
philippea22f59d2012-01-26 23:13:52 +00001813 }
1814 }
1815 }
1816
philippece3b04c2015-09-02 21:26:34 +00001817Bool MC_(print_block_list) ( UInt loss_record_nr_from,
1818 UInt loss_record_nr_to,
1819 UInt max_blocks,
1820 UInt heuristics)
philippea22f59d2012-01-26 23:13:52 +00001821{
philippece3b04c2015-09-02 21:26:34 +00001822 UInt loss_record_nr;
florian47755db2015-08-05 12:09:55 +00001823 UInt i, n_lossrecords;
philippea22f59d2012-01-26 23:13:52 +00001824 LossRecord* lr;
philippece3b04c2015-09-02 21:26:34 +00001825 Bool lr_printed;
philippe6d3cb492015-08-13 22:49:32 +00001826 UInt remaining = max_blocks;
philippea22f59d2012-01-26 23:13:52 +00001827
1828 if (lr_table == NULL || lc_chunks == NULL || lc_extras == NULL) {
1829 VG_(umsg)("Can't print block list : no valid leak search result\n");
1830 return False;
1831 }
1832
1833 if (lc_chunks_n_frees_marker != MC_(get_cmalloc_n_frees)()) {
1834 VG_(umsg)("Can't print obsolete block list : redo a leak search first\n");
1835 return False;
1836 }
1837
1838 n_lossrecords = VG_(OSetGen_Size)(lr_table);
philippece3b04c2015-09-02 21:26:34 +00001839 if (loss_record_nr_from >= n_lossrecords)
1840 return False; // Invalid starting loss record nr.
1841
1842 if (loss_record_nr_to >= n_lossrecords)
1843 loss_record_nr_to = n_lossrecords - 1;
philippea22f59d2012-01-26 23:13:52 +00001844
1845 tl_assert (lr_array);
philippece3b04c2015-09-02 21:26:34 +00001846
1847 for (loss_record_nr = loss_record_nr_from;
1848 loss_record_nr <= loss_record_nr_to && remaining > 0;
1849 loss_record_nr++) {
1850 lr = lr_array[loss_record_nr];
1851 lr_printed = False;
1852
1853 /* If user asks to print a specific loss record, we print
1854 the block details, even if no block will be shown for this lr.
1855 If user asks to print a range of lr, we only print lr details
1856 when at least one block is shown. */
1857 if (loss_record_nr_from == loss_record_nr_to) {
1858 /* (+1 on loss_record_nr as user numbering for loss records
1859 starts at 1). */
1860 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1861 lr_printed = True;
1862 }
philippea22f59d2012-01-26 23:13:52 +00001863
philippece3b04c2015-09-02 21:26:34 +00001864 // Match the chunks with loss records.
1865 for (i = 0; i < lc_n_chunks && remaining > 0; i++) {
1866 MC_Chunk* ch = lc_chunks[i];
1867 LC_Extra* ex = &(lc_extras)[i];
1868 LossRecord* old_lr;
1869 LossRecordKey lrkey;
1870 lrkey.state = ex->state;
1871 lrkey.allocated_at = MC_(allocated_at)(ch);
philippea22f59d2012-01-26 23:13:52 +00001872
philippece3b04c2015-09-02 21:26:34 +00001873 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1874 if (old_lr) {
1875 // We found an existing loss record matching this chunk.
1876 // If this is the loss record we are looking for, output the
1877 // pointer.
1878 if (old_lr == lr_array[loss_record_nr]
1879 && (heuristics == 0 || HiS(ex->heuristic, heuristics))) {
1880 if (!lr_printed) {
1881 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1882 lr_printed = True;
1883 }
philippea22f59d2012-01-26 23:13:52 +00001884
philippece3b04c2015-09-02 21:26:34 +00001885 if (ex->heuristic)
1886 VG_(umsg)("%p[%lu] (found via heuristic %s)\n",
1887 (void *)ch->data, (SizeT)ch->szB,
1888 pp_heuristic (ex->heuristic));
1889 else
1890 VG_(umsg)("%p[%lu]\n",
1891 (void *)ch->data, (SizeT)ch->szB);
1892 remaining--;
1893 if (ex->state != Reachable) {
1894 // We can print the clique in all states, except Reachable.
1895 // In Unreached state, lc_chunk[i] is the clique leader.
1896 // In IndirectLeak, lc_chunk[i] might have been a clique
1897 // leader which was later collected in another clique.
1898 // For Possible, lc_chunk[i] might be the top of a clique
1899 // or an intermediate clique.
1900 print_clique(i, 1, &remaining);
1901 }
philippea22f59d2012-01-26 23:13:52 +00001902 }
philippece3b04c2015-09-02 21:26:34 +00001903 } else {
1904 // No existing loss record matches this chunk ???
1905 VG_(umsg)("error: no loss record found for %p[%lu]?????\n",
1906 (void *)ch->data, (SizeT)ch->szB);
philippea22f59d2012-01-26 23:13:52 +00001907 }
philippea22f59d2012-01-26 23:13:52 +00001908 }
1909 }
1910 return True;
1911}
1912
1913// If searched = 0, scan memory root set, pushing onto the mark stack the blocks
1914// encountered.
philippeab1fce92013-09-29 13:47:32 +00001915// Otherwise (searched != 0), scan the memory root set searching for ptr
1916// pointing inside [searched, searched+szB[.
philippea22f59d2012-01-26 23:13:52 +00001917static void scan_memory_root_set(Addr searched, SizeT szB)
1918{
1919 Int i;
1920 Int n_seg_starts;
florianea8a88c2015-02-20 14:00:23 +00001921 Addr* seg_starts = VG_(get_segment_starts)( SkFileC | SkAnonC | SkShmC,
1922 &n_seg_starts );
philippea22f59d2012-01-26 23:13:52 +00001923
1924 tl_assert(seg_starts && n_seg_starts > 0);
1925
1926 lc_scanned_szB = 0;
philippe7a76f4b2013-10-06 21:23:04 +00001927 lc_sig_skipped_szB = 0;
philippea22f59d2012-01-26 23:13:52 +00001928
1929 // VG_(am_show_nsegments)( 0, "leakcheck");
1930 for (i = 0; i < n_seg_starts; i++) {
1931 SizeT seg_size;
1932 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
1933 tl_assert(seg);
florianea8a88c2015-02-20 14:00:23 +00001934 tl_assert(seg->kind == SkFileC || seg->kind == SkAnonC ||
1935 seg->kind == SkShmC);
philippea22f59d2012-01-26 23:13:52 +00001936
philippea22f59d2012-01-26 23:13:52 +00001937 if (!(seg->hasR && seg->hasW)) continue;
1938 if (seg->isCH) continue;
1939
1940 // Don't poke around in device segments as this may cause
florian5d3d43d2015-02-20 16:46:50 +00001941 // hangs. Include /dev/zero just in case someone allocated
philippea22f59d2012-01-26 23:13:52 +00001942 // memory by explicitly mapping /dev/zero.
1943 if (seg->kind == SkFileC
1944 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
floriand3166c42015-01-24 00:02:19 +00001945 const HChar* dev_name = VG_(am_get_filename)( seg );
philippea22f59d2012-01-26 23:13:52 +00001946 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1947 // Don't skip /dev/zero.
1948 } else {
1949 // Skip this device mapping.
1950 continue;
1951 }
1952 }
1953
1954 if (0)
1955 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1956
1957 // Scan the segment. We use -1 for the clique number, because this
1958 // is a root-set.
1959 seg_size = seg->end - seg->start + 1;
1960 if (VG_(clo_verbosity) > 2) {
1961 VG_(message)(Vg_DebugMsg,
1962 " Scanning root segment: %#lx..%#lx (%lu)\n",
1963 seg->start, seg->end, seg_size);
1964 }
1965 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True,
1966 /*clique*/-1, /*cur_clique*/-1,
1967 searched, szB);
1968 }
philippe7d69fd92012-02-26 21:26:00 +00001969 VG_(free)(seg_starts);
philippea22f59d2012-01-26 23:13:52 +00001970}
1971
Elliott Hughesa0664b92017-04-18 17:46:52 -07001972static MC_Mempool *find_mp_of_chunk (MC_Chunk* mc_search)
1973{
1974 MC_Mempool* mp;
1975
1976 tl_assert( MC_(mempool_list) );
1977
1978 VG_(HT_ResetIter)( MC_(mempool_list) );
1979 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1980 MC_Chunk* mc;
1981 VG_(HT_ResetIter)(mp->chunks);
1982 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1983 if (mc == mc_search)
1984 return mp;
1985 }
1986 }
1987
1988 return NULL;
1989}
1990
njn8225cc02009-03-09 22:52:24 +00001991/*------------------------------------------------------------*/
1992/*--- Top-level entry point. ---*/
1993/*------------------------------------------------------------*/
sewardj3cf26a52006-07-27 23:48:53 +00001994
philippe84234902012-01-14 13:53:13 +00001995void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
njn43c799e2003-04-08 00:08:52 +00001996{
njnb965efb2009-08-10 07:36:54 +00001997 Int i, j;
njn43c799e2003-04-08 00:08:52 +00001998
philippe84234902012-01-14 13:53:13 +00001999 tl_assert(lcp->mode != LC_Off);
sewardjc8bd1df2011-06-26 12:41:33 +00002000
philippe57a16a22012-07-18 22:26:51 +00002001 // Verify some assertions which are used in lc_scan_memory.
2002 tl_assert((VKI_PAGE_SIZE % sizeof(Addr)) == 0);
2003 tl_assert((SM_SIZE % sizeof(Addr)) == 0);
2004 // Above two assertions are critical, while below assertion
2005 // ensures that the optimisation in the loop is done in the
2006 // correct order : the loop checks for (big) SM chunk skipping
2007 // before checking for (smaller) page skipping.
2008 tl_assert((SM_SIZE % VKI_PAGE_SIZE) == 0);
2009
philippe4e32d672013-10-17 22:10:41 +00002010 MC_(leak_search_gen)++;
philippe84234902012-01-14 13:53:13 +00002011 MC_(detect_memory_leaks_last_delta_mode) = lcp->deltamode;
philippeab1fce92013-09-29 13:47:32 +00002012 detect_memory_leaks_last_heuristics = lcp->heuristics;
njn43c799e2003-04-08 00:08:52 +00002013
njn8225cc02009-03-09 22:52:24 +00002014 // Get the chunks, stop if there were none.
philippea22f59d2012-01-26 23:13:52 +00002015 if (lc_chunks) {
2016 VG_(free)(lc_chunks);
2017 lc_chunks = NULL;
2018 }
njn8225cc02009-03-09 22:52:24 +00002019 lc_chunks = find_active_chunks(&lc_n_chunks);
philippea22f59d2012-01-26 23:13:52 +00002020 lc_chunks_n_frees_marker = MC_(get_cmalloc_n_frees)();
njn8225cc02009-03-09 22:52:24 +00002021 if (lc_n_chunks == 0) {
2022 tl_assert(lc_chunks == NULL);
sewardjc8bd1df2011-06-26 12:41:33 +00002023 if (lr_table != NULL) {
philippea22f59d2012-01-26 23:13:52 +00002024 // forget the previous recorded LossRecords as next leak search
2025 // can in any case just create new leaks.
sewardjc8bd1df2011-06-26 12:41:33 +00002026 // Maybe it would be better to rather call print_result ?
philippea22f59d2012-01-26 23:13:52 +00002027 // (at least when leak decreases are requested)
sewardjc8bd1df2011-06-26 12:41:33 +00002028 // This will then output all LossRecords with a size decreasing to 0
2029 VG_(OSetGen_Destroy) (lr_table);
philippea22f59d2012-01-26 23:13:52 +00002030 lr_table = NULL;
sewardjc8bd1df2011-06-26 12:41:33 +00002031 }
sewardj71bc3cb2005-05-19 00:25:45 +00002032 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00002033 VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
sewardj2d9e8742009-08-07 15:46:56 +00002034 VG_(umsg)("\n");
sewardj37d06f22003-09-17 21:48:26 +00002035 }
njn43c799e2003-04-08 00:08:52 +00002036 return;
2037 }
2038
njn8225cc02009-03-09 22:52:24 +00002039 // Sort the array so blocks are in ascending order in memory.
2040 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
njn43c799e2003-04-08 00:08:52 +00002041
njn8225cc02009-03-09 22:52:24 +00002042 // Sanity check -- make sure they're in order.
2043 for (i = 0; i < lc_n_chunks-1; i++) {
2044 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
2045 }
njn43c799e2003-04-08 00:08:52 +00002046
Elliott Hughesa0664b92017-04-18 17:46:52 -07002047 // Sanity check -- make sure they don't overlap. One exception is that
njnb965efb2009-08-10 07:36:54 +00002048 // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
2049 // This is for bug 100628. If this occurs, we ignore the malloc() block
2050 // for leak-checking purposes. This is a hack and probably should be done
2051 // better, but at least it's consistent with mempools (which are treated
2052 // like this in find_active_chunks). Mempools have a separate VgHashTable
2053 // for mempool chunks, but if custom-allocated blocks are put in a separate
2054 // table from normal heap blocks it makes free-mismatch checking more
2055 // difficult.
Elliott Hughesa0664b92017-04-18 17:46:52 -07002056 // Another exception: Metapool memory blocks overlap by definition. The meta-
2057 // block is allocated (by a custom allocator), and chunks of that block are
2058 // allocated again for use by the application: Not an error.
njnb965efb2009-08-10 07:36:54 +00002059 //
2060 // If this check fails, it probably means that the application
njn8225cc02009-03-09 22:52:24 +00002061 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
njnb965efb2009-08-10 07:36:54 +00002062 // requests, eg. has made overlapping requests (which are
2063 // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
2064 // again nonsensical.
2065 //
njn8225cc02009-03-09 22:52:24 +00002066 for (i = 0; i < lc_n_chunks-1; i++) {
2067 MC_Chunk* ch1 = lc_chunks[i];
2068 MC_Chunk* ch2 = lc_chunks[i+1];
njnb965efb2009-08-10 07:36:54 +00002069
2070 Addr start1 = ch1->data;
2071 Addr start2 = ch2->data;
2072 Addr end1 = ch1->data + ch1->szB - 1;
2073 Addr end2 = ch2->data + ch2->szB - 1;
2074 Bool isCustom1 = ch1->allockind == MC_AllocCustom;
2075 Bool isCustom2 = ch2->allockind == MC_AllocCustom;
2076
2077 if (end1 < start2) {
2078 // Normal case - no overlap.
2079
2080 // We used to allow exact duplicates, I'm not sure why. --njn
2081 //} else if (start1 == start2 && end1 == end2) {
2082 // Degenerate case: exact duplicates.
2083
2084 } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
2085 // Block i is MALLOCLIKE and entirely within block i+1.
2086 // Remove block i+1.
2087 for (j = i+1; j < lc_n_chunks-1; j++) {
2088 lc_chunks[j] = lc_chunks[j+1];
2089 }
2090 lc_n_chunks--;
2091
2092 } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
2093 // Block i+1 is MALLOCLIKE and entirely within block i.
2094 // Remove block i.
2095 for (j = i; j < lc_n_chunks-1; j++) {
2096 lc_chunks[j] = lc_chunks[j+1];
2097 }
2098 lc_n_chunks--;
2099
2100 } else {
Elliott Hughesa0664b92017-04-18 17:46:52 -07002101 // Overlap is allowed ONLY when one of the two candicates is a block
2102 // from a memory pool that has the metapool attribute set.
2103 // All other mixtures trigger the error + assert.
2104 MC_Mempool* mp;
2105 Bool ch1_is_meta = False, ch2_is_meta = False;
2106 Bool Inappropriate = False;
2107
2108 if (MC_(is_mempool_block)(ch1)) {
2109 mp = find_mp_of_chunk(ch1);
2110 if (mp && mp->metapool) {
2111 ch1_is_meta = True;
2112 }
2113 }
2114
2115 if (MC_(is_mempool_block)(ch2)) {
2116 mp = find_mp_of_chunk(ch2);
2117 if (mp && mp->metapool) {
2118 ch2_is_meta = True;
2119 }
2120 }
2121
2122 // If one of the blocks is a meta block, the other must be entirely
2123 // within that meta block, or something is really wrong with the custom
2124 // allocator.
2125 if (ch1_is_meta != ch2_is_meta) {
2126 if ( (ch1_is_meta && (start2 < start1 || end2 > end1)) ||
2127 (ch2_is_meta && (start1 < start2 || end1 > end2)) ) {
2128 Inappropriate = True;
2129 }
2130 }
2131
2132 if (ch1_is_meta == ch2_is_meta || Inappropriate) {
2133 VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
2134 start1, end1, start2, end2);
2135 VG_(umsg)("Blocks allocation contexts:\n"),
2136 VG_(pp_ExeContext)( MC_(allocated_at)(ch1));
2137 VG_(umsg)("\n"),
2138 VG_(pp_ExeContext)( MC_(allocated_at)(ch2));
2139 VG_(umsg)("This is usually caused by using ");
2140 VG_(umsg)("VALGRIND_MALLOCLIKE_BLOCK in an inappropriate way.\n");
2141 tl_assert (0);
2142 }
njn8225cc02009-03-09 22:52:24 +00002143 }
njn8225cc02009-03-09 22:52:24 +00002144 }
2145
2146 // Initialise lc_extras.
philippea22f59d2012-01-26 23:13:52 +00002147 if (lc_extras) {
2148 VG_(free)(lc_extras);
2149 lc_extras = NULL;
2150 }
njn8225cc02009-03-09 22:52:24 +00002151 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
2152 for (i = 0; i < lc_n_chunks; i++) {
2153 lc_extras[i].state = Unreached;
tom1d0f3f62010-10-04 20:55:21 +00002154 lc_extras[i].pending = False;
philippeab1fce92013-09-29 13:47:32 +00002155 lc_extras[i].heuristic = LchNone;
philippea22f59d2012-01-26 23:13:52 +00002156 lc_extras[i].IorC.indirect_szB = 0;
njn8225cc02009-03-09 22:52:24 +00002157 }
2158
2159 // Initialise lc_markstack.
2160 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
2161 for (i = 0; i < lc_n_chunks; i++) {
2162 lc_markstack[i] = -1;
sewardjb5f6f512005-03-10 23:59:00 +00002163 }
2164 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +00002165
njn8225cc02009-03-09 22:52:24 +00002166 // Verbosity.
sewardj2d9e8742009-08-07 15:46:56 +00002167 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00002168 VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
sewardj6b523cd2009-07-15 14:49:40 +00002169 lc_n_chunks );
sewardj2d9e8742009-08-07 15:46:56 +00002170 }
sewardjb5f6f512005-03-10 23:59:00 +00002171
njn8225cc02009-03-09 22:52:24 +00002172 // Scan the memory root-set, pushing onto the mark stack any blocks
2173 // pointed to.
philippea22f59d2012-01-26 23:13:52 +00002174 scan_memory_root_set(/*searched*/0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00002175
njn8225cc02009-03-09 22:52:24 +00002176 // Scan GP registers for chunk pointers.
2177 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
sewardjb5f6f512005-03-10 23:59:00 +00002178
njn8225cc02009-03-09 22:52:24 +00002179 // Process the pushed blocks. After this, every block that is reachable
2180 // from the root-set has been traced.
2181 lc_process_markstack(/*clique*/-1);
njn43c799e2003-04-08 00:08:52 +00002182
njnb6267bd2009-08-12 00:14:16 +00002183 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
2184 VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
philippe7a76f4b2013-10-06 21:23:04 +00002185 if (lc_sig_skipped_szB > 0)
2186 VG_(umsg)("Skipped %'lu bytes due to read errors\n",
2187 lc_sig_skipped_szB);
njnb6267bd2009-08-12 00:14:16 +00002188 VG_(umsg)( "\n" );
2189 }
njn43c799e2003-04-08 00:08:52 +00002190
njn8225cc02009-03-09 22:52:24 +00002191 // Trace all the leaked blocks to determine which are directly leaked and
2192 // which are indirectly leaked. For each Unreached block, push it onto
2193 // the mark stack, and find all the as-yet-Unreached blocks reachable
2194 // from it. These form a clique and are marked IndirectLeak, and their
2195 // size is added to the clique leader's indirect size. If one of the
2196 // found blocks was itself a clique leader (from a previous clique), then
2197 // the cliques are merged.
2198 for (i = 0; i < lc_n_chunks; i++) {
2199 MC_Chunk* ch = lc_chunks[i];
2200 LC_Extra* ex = &(lc_extras[i]);
njn43c799e2003-04-08 00:08:52 +00002201
njn8225cc02009-03-09 22:52:24 +00002202 if (VG_DEBUG_CLIQUE)
2203 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
2204 i, ch->data, ex->state);
njn43c799e2003-04-08 00:08:52 +00002205
njn8225cc02009-03-09 22:52:24 +00002206 tl_assert(lc_markstack_top == -1);
2207
2208 if (ex->state == Unreached) {
2209 if (VG_DEBUG_CLIQUE)
2210 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
2211
2212 // Push this Unreached block onto the stack and process it.
2213 lc_push(i, ch);
philippea22f59d2012-01-26 23:13:52 +00002214 lc_process_markstack(/*clique*/i);
njn8225cc02009-03-09 22:52:24 +00002215
2216 tl_assert(lc_markstack_top == -1);
2217 tl_assert(ex->state == Unreached);
nethercote0f19bce2003-12-02 10:17:44 +00002218 }
njn43c799e2003-04-08 00:08:52 +00002219 }
philippeb5a02e72015-10-22 19:14:30 +00002220
sewardjc8bd1df2011-06-26 12:41:33 +00002221 print_results( tid, lcp);
njn43c799e2003-04-08 00:08:52 +00002222
sewardjb5f6f512005-03-10 23:59:00 +00002223 VG_(free) ( lc_markstack );
philippea22f59d2012-01-26 23:13:52 +00002224 lc_markstack = NULL;
2225 // lc_chunks, lc_extras, lr_array and lr_table are kept (needed if user
2226 // calls MC_(print_block_list)). lr_table also used for delta leak reporting
2227 // between this leak search and the next leak search.
2228}
2229
2230static Addr searched_wpa;
2231static SizeT searched_szB;
2232static void
florian6bd9dc12012-11-23 16:17:43 +00002233search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
philippea22f59d2012-01-26 23:13:52 +00002234{
2235 if (addr_in_reg >= searched_wpa
2236 && addr_in_reg < searched_wpa + searched_szB) {
2237 if (addr_in_reg == searched_wpa)
2238 VG_(umsg)
floriande3df032015-08-04 21:26:10 +00002239 ("tid %u register %s pointing at %#lx\n",
philippea22f59d2012-01-26 23:13:52 +00002240 tid, regname, searched_wpa);
2241 else
2242 VG_(umsg)
floriande3df032015-08-04 21:26:10 +00002243 ("tid %u register %s interior pointing %lu bytes inside %#lx\n",
philippea22f59d2012-01-26 23:13:52 +00002244 tid, regname, (long unsigned) addr_in_reg - searched_wpa,
2245 searched_wpa);
2246 }
2247}
2248
2249void MC_(who_points_at) ( Addr address, SizeT szB)
2250{
2251 MC_Chunk** chunks;
2252 Int n_chunks;
2253 Int i;
2254
2255 if (szB == 1)
2256 VG_(umsg) ("Searching for pointers to %#lx\n", address);
2257 else
2258 VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n",
2259 szB, address);
2260
philippeab1fce92013-09-29 13:47:32 +00002261 chunks = find_active_chunks(&n_chunks);
2262
philippea22f59d2012-01-26 23:13:52 +00002263 // Scan memory root-set, searching for ptr pointing in address[szB]
2264 scan_memory_root_set(address, szB);
2265
2266 // Scan active malloc-ed chunks
philippea22f59d2012-01-26 23:13:52 +00002267 for (i = 0; i < n_chunks; i++) {
2268 lc_scan_memory(chunks[i]->data, chunks[i]->szB,
2269 /*is_prior_definite*/True,
2270 /*clique*/-1, /*cur_clique*/-1,
2271 address, szB);
2272 }
2273 VG_(free) ( chunks );
2274
2275 // Scan GP registers for pointers to address range.
2276 searched_wpa = address;
2277 searched_szB = szB;
2278 VG_(apply_to_GP_regs)(search_address_in_GP_reg);
2279
njn43c799e2003-04-08 00:08:52 +00002280}
2281
2282/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00002283/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +00002284/*--------------------------------------------------------------------*/
2285