blob: 4555a22f0bd65b8a59214682419b2ca8bd095e29 [file] [log] [blame]
njn43c799e2003-04-08 00:08:52 +00001
2/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003/*--- The leak checker. mc_leakcheck.c ---*/
njn43c799e2003-04-08 00:08:52 +00004/*--------------------------------------------------------------------*/
5
6/*
nethercote137bc552003-11-14 17:47:54 +00007 This file is part of MemCheck, a heavyweight Valgrind tool for
njn1d0825f2006-03-27 11:37:07 +00008 detecting memory errors.
njn43c799e2003-04-08 00:08:52 +00009
sewardj03f8d3f2012-08-05 15:46:46 +000010 Copyright (C) 2000-2012 Julian Seward
njn43c799e2003-04-08 00:08:52 +000011 jseward@acm.org
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
njnc7561b92005-06-19 01:24:32 +000031#include "pub_tool_basics.h"
sewardj4cfea4f2006-10-14 19:26:10 +000032#include "pub_tool_vki.h"
njnac1e0332009-05-08 00:39:31 +000033#include "pub_tool_aspacehl.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_execontext.h"
36#include "pub_tool_hashtable.h"
njn97405b22005-06-02 03:39:33 +000037#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000038#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000039#include "pub_tool_libcprint.h"
njnde62cbf2005-06-10 22:08:14 +000040#include "pub_tool_libcsignal.h"
njn6ace3ea2005-06-17 03:06:27 +000041#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000042#include "pub_tool_mallocfree.h"
43#include "pub_tool_options.h"
njn29a5c012009-05-06 06:15:55 +000044#include "pub_tool_oset.h"
philippe6643e962012-01-17 21:16:30 +000045#include "pub_tool_poolalloc.h"
46#include "pub_tool_signals.h" // Needed for mc_include.h
sewardj6c591e12011-04-11 16:17:51 +000047#include "pub_tool_libcsetjmp.h" // setjmp facilities
njn1d0825f2006-03-27 11:37:07 +000048#include "pub_tool_tooliface.h" // Needed for mc_include.h
njn43c799e2003-04-08 00:08:52 +000049
njn1d0825f2006-03-27 11:37:07 +000050#include "mc_include.h"
njnc7561b92005-06-19 01:24:32 +000051
njn8225cc02009-03-09 22:52:24 +000052/*------------------------------------------------------------*/
53/*--- An overview of leak checking. ---*/
54/*------------------------------------------------------------*/
njnc7561b92005-06-19 01:24:32 +000055
njn8225cc02009-03-09 22:52:24 +000056// Leak-checking is a directed-graph traversal problem. The graph has
57// two kinds of nodes:
58// - root-set nodes:
59// - GP registers of all threads;
60// - valid, aligned, pointer-sized data words in valid client memory,
61// including stacks, but excluding words within client heap-allocated
62// blocks (they are excluded so that later on we can differentiate
63// between heap blocks that are indirectly leaked vs. directly leaked).
64// - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
65// that doesn't contain a mempool chunk. Nb: the terms "blocks" and
66// "chunks" are used interchangeably below.
67//
68// There are two kinds of edges:
69// - start-pointers, i.e. pointers to the start of a block;
70// - interior-pointers, i.e. pointers to the interior of a block.
71//
72// We use "pointers" rather than "edges" below.
73//
74// Root set nodes only point to blocks. Blocks only point to blocks;
75// a block can point to itself.
76//
77// The aim is to traverse the graph and determine the status of each block.
78//
79// There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
80// Presenting all nine categories to the user is probably too much.
81// Currently we do this:
82// - definitely lost: case 3
83// - indirectly lost: case 4, 9
84// - possibly lost: cases 5..8
85// - still reachable: cases 1, 2
86//
87// It's far from clear that this is the best possible categorisation; it's
88// accreted over time without any central guiding principle.
89
90/*------------------------------------------------------------*/
91/*--- XXX: Thoughts for improvement. ---*/
92/*------------------------------------------------------------*/
93
94// From the user's point of view:
95// - If they aren't using interior-pointers, they just have to fix the
96// directly lost blocks, and the indirectly lost ones will be fixed as
97// part of that. Any possibly lost blocks will just be due to random
98// pointer garbage and can be ignored.
99//
100// - If they are using interior-pointers, the fact that they currently are not
101// being told which ones might be directly lost vs. indirectly lost makes
102// it hard to know where to begin.
103//
104// All this makes me wonder if new option is warranted:
105// --follow-interior-pointers. By default it would be off, the leak checker
106// wouldn't follow interior-pointers and there would only be 3 categories:
107// R, DL, IL.
108//
109// If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
110// IR/IL/DL, IL/DL). That output is harder to understand but it's your own
111// damn fault for using interior-pointers...
112//
113// ----
114//
115// Also, why are two blank lines printed between each loss record?
njnc2f8b1b2009-08-10 06:47:00 +0000116// [bug 197930]
njn8225cc02009-03-09 22:52:24 +0000117//
118// ----
119//
120// Also, --show-reachable is a bad name because it also turns on the showing
121// of indirectly leaked blocks(!) It would be better named --show-all or
122// --show-all-heap-blocks, because that's the end result.
philippe2193a7c2012-12-08 17:54:16 +0000123// We now have the option --show-leak-kinds=... which allows to specify =all.
njn8225cc02009-03-09 22:52:24 +0000124//
125// ----
126//
127// Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
128// names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
129// better.
130//
131// ----
132//
133// Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
134// they combine direct leaks and indirect leaks into one. New, more precise
135// ones (they'll need new names) would be good. If more categories are
136// used, as per the --follow-interior-pointers option, they should be
137// updated accordingly. And they should use a struct to return the values.
138//
139// ----
140//
141// Also, for this case:
142//
143// (4) p4 BBB ---> AAA
144//
145// BBB is definitely directly lost. AAA is definitely indirectly lost.
146// Here's the relevant loss records printed for a full check (each block is
147// 16 bytes):
148//
149// ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
150// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
151// ==20397== by 0x400521: mk (leak-cases.c:49)
152// ==20397== by 0x400578: main (leak-cases.c:72)
153//
154// ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
155// lost in loss record 14 of 15
156// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
157// ==20397== by 0x400521: mk (leak-cases.c:49)
158// ==20397== by 0x400580: main (leak-cases.c:72)
159//
160// The first one is fine -- it describes AAA.
161//
162// The second one is for BBB. It's correct in that 16 bytes in 1 block are
163// directly lost. It's also correct that 16 are indirectly lost as a result,
164// but it means that AAA is being counted twice in the loss records. (It's
165// not, thankfully, counted twice in the summary counts). Argh.
166//
167// This would be less confusing for the second one:
168//
169// ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
170// of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
philippe2193a7c2012-12-08 17:54:16 +0000171// are mentioned elsewhere (if --show-reachable=yes or indirect is given
172// in --show-leak-kinds=... !))
njn8225cc02009-03-09 22:52:24 +0000173// ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
174// ==20397== by 0x400521: mk (leak-cases.c:49)
175// ==20397== by 0x400580: main (leak-cases.c:72)
176//
177// But ideally we'd present the loss record for the directly lost block and
178// then the resultant indirectly lost blocks and make it clear the
179// dependence. Double argh.
180
181/*------------------------------------------------------------*/
182/*--- The actual algorithm. ---*/
183/*------------------------------------------------------------*/
184
185// - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
186// some special treatment because they can be within malloc'd blocks.
187// - Scan every word in the root set (GP registers and valid
188// non-heap memory words).
189// - First, we skip if it doesn't point to valid memory.
190// - Then, we see if it points to the start or interior of a block. If
191// so, we push the block onto the mark stack and mark it as having been
192// reached.
193// - Then, we process the mark stack, repeating the scanning for each block;
194// this can push more blocks onto the mark stack. We repeat until the
195// mark stack is empty. Each block is marked as definitely or possibly
196// reachable, depending on whether interior-pointers were required to
197// reach it.
198// - At this point we know for every block if it's reachable or not.
199// - We then push each unreached block onto the mark stack, using the block
200// number as the "clique" number.
201// - We process the mark stack again, this time grouping blocks into cliques
202// in order to facilitate the directly/indirectly lost categorisation.
203// - We group blocks by their ExeContexts and categorisation, and print them
204// if --leak-check=full. We also print summary numbers.
205//
206// A note on "cliques":
207// - A directly lost block is one with no pointers to it. An indirectly
208// lost block is one that is pointed to by a directly or indirectly lost
209// block.
210// - Each directly lost block has zero or more indirectly lost blocks
211// hanging off it. All these blocks together form a "clique". The
212// directly lost block is called the "clique leader". The clique number
213// is the number (in lc_chunks[]) of the clique leader.
214// - Actually, a directly lost block may be pointed to if it's part of a
215// cycle. In that case, there may be more than one choice for the clique
216// leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
217// either A or B could be the clique leader.
218// - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
219// have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
220// {B,C} (again the choice is arbitrary). This is because we don't want
221// to count a block as indirectly lost more than once.
222//
223// A note on 'is_prior_definite':
224// - This is a boolean used in various places that indicates if the chain
225// up to the prior node (prior to the one being considered) is definite.
226// - In the clique == -1 case:
227// - if True it means that the prior node is a root-set node, or that the
228// prior node is a block which is reachable from the root-set via
229// start-pointers.
230// - if False it means that the prior node is a block that is only
231// reachable from the root-set via a path including at least one
232// interior-pointer.
233// - In the clique != -1 case, currently it's always True because we treat
234// start-pointers and interior-pointers the same for direct/indirect leak
235// checking. If we added a PossibleIndirectLeak state then this would
236// change.
237
238
239// Define to debug the memory-leak-detector.
sewardjb5f6f512005-03-10 23:59:00 +0000240#define VG_DEBUG_LEAKCHECK 0
njn8225cc02009-03-09 22:52:24 +0000241#define VG_DEBUG_CLIQUE 0
242
sewardjb5f6f512005-03-10 23:59:00 +0000243
njn43c799e2003-04-08 00:08:52 +0000244/*------------------------------------------------------------*/
njn8225cc02009-03-09 22:52:24 +0000245/*--- Getting the initial chunks, and searching them. ---*/
njn43c799e2003-04-08 00:08:52 +0000246/*------------------------------------------------------------*/
247
njn8225cc02009-03-09 22:52:24 +0000248// Compare the MC_Chunks by 'data' (i.e. the address of the block).
florian6bd9dc12012-11-23 16:17:43 +0000249static Int compare_MC_Chunks(const void* n1, const void* n2)
njn43c799e2003-04-08 00:08:52 +0000250{
florian3e798632012-11-24 19:41:54 +0000251 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
252 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
njn8225cc02009-03-09 22:52:24 +0000253 if (mc1->data < mc2->data) return -1;
254 if (mc1->data > mc2->data) return 1;
255 return 0;
njn43c799e2003-04-08 00:08:52 +0000256}
257
njn8225cc02009-03-09 22:52:24 +0000258#if VG_DEBUG_LEAKCHECK
259// Used to sanity-check the fast binary-search mechanism.
260static
261Int find_chunk_for_OLD ( Addr ptr,
262 MC_Chunk** chunks,
263 Int n_chunks )
264
265{
266 Int i;
267 Addr a_lo, a_hi;
268 PROF_EVENT(70, "find_chunk_for_OLD");
269 for (i = 0; i < n_chunks; i++) {
270 PROF_EVENT(71, "find_chunk_for_OLD(loop)");
271 a_lo = chunks[i]->data;
272 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
273 if (a_lo <= ptr && ptr < a_hi)
274 return i;
275 }
276 return -1;
277}
278#endif
279
280// Find the i such that ptr points at or inside the block described by
281// chunks[i]. Return -1 if none found. This assumes that chunks[]
282// has been sorted on the 'data' field.
283static
284Int find_chunk_for ( Addr ptr,
285 MC_Chunk** chunks,
286 Int n_chunks )
287{
288 Addr a_mid_lo, a_mid_hi;
289 Int lo, mid, hi, retVal;
290 // VG_(printf)("find chunk for %p = ", ptr);
291 retVal = -1;
292 lo = 0;
293 hi = n_chunks-1;
294 while (True) {
295 // Invariant: current unsearched space is from lo to hi, inclusive.
296 if (lo > hi) break; // not found
297
298 mid = (lo + hi) / 2;
299 a_mid_lo = chunks[mid]->data;
300 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
301 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
302 // Special-case zero-sized blocks - treat them as if they had
303 // size 1. Not doing so causes them to not cover any address
304 // range at all and so will never be identified as the target of
305 // any pointer, which causes them to be incorrectly reported as
306 // definitely leaked.
307 if (chunks[mid]->szB == 0)
308 a_mid_hi++;
309
310 if (ptr < a_mid_lo) {
311 hi = mid-1;
312 continue;
313 }
314 if (ptr >= a_mid_hi) {
315 lo = mid+1;
316 continue;
317 }
318 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
319 retVal = mid;
320 break;
321 }
322
323# if VG_DEBUG_LEAKCHECK
324 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
325# endif
326 // VG_(printf)("%d\n", retVal);
327 return retVal;
328}
329
330
331static MC_Chunk**
florian54fe2022012-10-27 23:07:42 +0000332find_active_chunks(Int* pn_chunks)
njn8225cc02009-03-09 22:52:24 +0000333{
334 // Our goal is to construct a set of chunks that includes every
335 // mempool chunk, and every malloc region that *doesn't* contain a
336 // mempool chunk.
337 MC_Mempool *mp;
338 MC_Chunk **mallocs, **chunks, *mc;
339 UInt n_mallocs, n_chunks, m, s;
340 Bool *malloc_chunk_holds_a_pool_chunk;
341
342 // First we collect all the malloc chunks into an array and sort it.
343 // We do this because we want to query the chunks by interior
344 // pointers, requiring binary search.
345 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
346 if (n_mallocs == 0) {
347 tl_assert(mallocs == NULL);
348 *pn_chunks = 0;
349 return NULL;
350 }
351 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
352
353 // Then we build an array containing a Bool for each malloc chunk,
354 // indicating whether it contains any mempools.
355 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
356 n_mallocs, sizeof(Bool) );
357 n_chunks = n_mallocs;
358
359 // Then we loop over the mempool tables. For each chunk in each
360 // pool, we set the entry in the Bool array corresponding to the
361 // malloc chunk containing the mempool chunk.
362 VG_(HT_ResetIter)(MC_(mempool_list));
363 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
364 VG_(HT_ResetIter)(mp->chunks);
365 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
366
367 // We'll need to record this chunk.
368 n_chunks++;
369
370 // Possibly invalidate the malloc holding the beginning of this chunk.
371 m = find_chunk_for(mc->data, mallocs, n_mallocs);
372 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
373 tl_assert(n_chunks > 0);
374 n_chunks--;
375 malloc_chunk_holds_a_pool_chunk[m] = True;
376 }
377
378 // Possibly invalidate the malloc holding the end of this chunk.
379 if (mc->szB > 1) {
380 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
381 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
382 tl_assert(n_chunks > 0);
383 n_chunks--;
384 malloc_chunk_holds_a_pool_chunk[m] = True;
385 }
386 }
387 }
388 }
389 tl_assert(n_chunks > 0);
390
391 // Create final chunk array.
392 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
393 s = 0;
394
395 // Copy the mempool chunks and the non-marked malloc chunks into a
396 // combined array of chunks.
397 VG_(HT_ResetIter)(MC_(mempool_list));
398 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
399 VG_(HT_ResetIter)(mp->chunks);
400 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
401 tl_assert(s < n_chunks);
402 chunks[s++] = mc;
403 }
404 }
405 for (m = 0; m < n_mallocs; ++m) {
406 if (!malloc_chunk_holds_a_pool_chunk[m]) {
407 tl_assert(s < n_chunks);
408 chunks[s++] = mallocs[m];
409 }
410 }
411 tl_assert(s == n_chunks);
412
413 // Free temporaries.
414 VG_(free)(mallocs);
415 VG_(free)(malloc_chunk_holds_a_pool_chunk);
416
417 *pn_chunks = n_chunks;
418
419 return chunks;
420}
421
422/*------------------------------------------------------------*/
423/*--- The leak detector proper. ---*/
424/*------------------------------------------------------------*/
425
426// Holds extra info about each block during leak checking.
427typedef
428 struct {
429 UInt state:2; // Reachedness.
philippeab1fce92013-09-29 13:47:32 +0000430 UInt pending:1; // Scan pending.
431 UInt heuristic: (sizeof(UInt)*8)-3;
432 // Heuristic with which this block was considered reachable.
433 // LchNone if state != Reachable or no heuristic needed to
434 // consider it reachable.
435
philippea22f59d2012-01-26 23:13:52 +0000436 union {
philippeab1fce92013-09-29 13:47:32 +0000437 SizeT indirect_szB;
438 // If Unreached, how many bytes are unreachable from here.
439 SizeT clique;
440 // if IndirectLeak, clique leader to which it belongs.
philippea22f59d2012-01-26 23:13:52 +0000441 } IorC;
njn8225cc02009-03-09 22:52:24 +0000442 }
443 LC_Extra;
444
445// An array holding pointers to every chunk we're checking. Sorted by address.
philippea22f59d2012-01-26 23:13:52 +0000446// lc_chunks is initialised during leak search. It is kept after leak search
447// to support printing the list of blocks belonging to a loss record.
448// lc_chunk array can only be used validly till the next "free" operation
449// (as a free operation potentially destroys one or more chunks).
450// To detect lc_chunk is valid, we store the nr of frees operations done
451// when lc_chunk was build : lc_chunks (and lc_extras) stays valid as
452// long as no free operations has been done since lc_chunks building.
njn8225cc02009-03-09 22:52:24 +0000453static MC_Chunk** lc_chunks;
454// How many chunks we're dealing with.
455static Int lc_n_chunks;
philippea22f59d2012-01-26 23:13:52 +0000456static SizeT lc_chunks_n_frees_marker;
457// This has the same number of entries as lc_chunks, and each entry
458// in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
459// lc_extras[i] describe the same block).
460static LC_Extra* lc_extras;
461
sewardjc8bd1df2011-06-26 12:41:33 +0000462// chunks will be converted and merged in loss record, maintained in lr_table
463// lr_table elements are kept from one leak_search to another to implement
464// the "print new/changed leaks" client request
465static OSet* lr_table;
philippea22f59d2012-01-26 23:13:52 +0000466// Array of sorted loss record (produced during last leak search).
467static LossRecord** lr_array;
468
philippeab1fce92013-09-29 13:47:32 +0000469// Value of the heuristics parameter used in the current (or last) leak check.
470static UInt detect_memory_leaks_last_heuristics;
sewardjc8bd1df2011-06-26 12:41:33 +0000471
472// DeltaMode used the last time we called detect_memory_leaks.
philippeab1fce92013-09-29 13:47:32 +0000473// The recorded leak errors are output using a logic based on this delta_mode.
sewardjc8bd1df2011-06-26 12:41:33 +0000474// The below avoids replicating the delta_mode in each LossRecord.
475LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
476
njn8225cc02009-03-09 22:52:24 +0000477
njn8225cc02009-03-09 22:52:24 +0000478// Records chunks that are currently being processed. Each element in the
479// stack is an index into lc_chunks and lc_extras. Its size is
480// 'lc_n_chunks' because in the worst case that's how many chunks could be
481// pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
482// be conservative).
483static Int* lc_markstack;
484// The index of the top element of the stack; -1 if the stack is empty, 0 if
485// the stack has one element, 1 if it has two, etc.
486static Int lc_markstack_top;
487
488// Keeps track of how many bytes of memory we've scanned, for printing.
489// (Nb: We don't keep track of how many register bytes we've scanned.)
490static SizeT lc_scanned_szB;
491
492
493SizeT MC_(bytes_leaked) = 0;
494SizeT MC_(bytes_indirect) = 0;
495SizeT MC_(bytes_dubious) = 0;
496SizeT MC_(bytes_reachable) = 0;
497SizeT MC_(bytes_suppressed) = 0;
498
499SizeT MC_(blocks_leaked) = 0;
500SizeT MC_(blocks_indirect) = 0;
501SizeT MC_(blocks_dubious) = 0;
502SizeT MC_(blocks_reachable) = 0;
503SizeT MC_(blocks_suppressed) = 0;
504
philippeab1fce92013-09-29 13:47:32 +0000505// Subset of MC_(bytes_reachable) and MC_(blocks_reachable) which
506// are considered reachable due to the corresponding heuristic.
507static SizeT MC_(bytes_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
508 = {0,0,0,0};
509static SizeT MC_(blocks_heuristically_reachable)[N_LEAK_CHECK_HEURISTICS]
510 = {0,0,0,0};
511
njn8225cc02009-03-09 22:52:24 +0000512// Determines if a pointer is to a chunk. Returns the chunk number et al
513// via call-by-reference.
514static Bool
515lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
njn43c799e2003-04-08 00:08:52 +0000516{
njn8225cc02009-03-09 22:52:24 +0000517 Int ch_no;
518 MC_Chunk* ch;
519 LC_Extra* ex;
njn43c799e2003-04-08 00:08:52 +0000520
philippe57a16a22012-07-18 22:26:51 +0000521 // Quick filter. Note: implemented with am, not with get_vabits2
522 // as ptr might be random data pointing anywhere. On 64 bit
523 // platforms, getting va bits for random data can be quite costly
524 // due to the secondary map.
njn8225cc02009-03-09 22:52:24 +0000525 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
526 return False;
sewardjb5f6f512005-03-10 23:59:00 +0000527 } else {
njn8225cc02009-03-09 22:52:24 +0000528 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
529 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
530
531 if (ch_no == -1) {
532 return False;
533 } else {
534 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
535 // LC_Extra.
536 ch = lc_chunks[ch_no];
537 ex = &(lc_extras[ch_no]);
538
539 tl_assert(ptr >= ch->data);
540 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
541
542 if (VG_DEBUG_LEAKCHECK)
543 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
544
545 *pch_no = ch_no;
546 *pch = ch;
547 *pex = ex;
548
549 return True;
550 }
sewardjb5f6f512005-03-10 23:59:00 +0000551 }
552}
553
njn8225cc02009-03-09 22:52:24 +0000554// Push a chunk (well, just its index) onto the mark stack.
555static void lc_push(Int ch_no, MC_Chunk* ch)
sewardjb5f6f512005-03-10 23:59:00 +0000556{
tom1d0f3f62010-10-04 20:55:21 +0000557 if (!lc_extras[ch_no].pending) {
558 if (0) {
559 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
560 }
561 lc_markstack_top++;
562 tl_assert(lc_markstack_top < lc_n_chunks);
563 lc_markstack[lc_markstack_top] = ch_no;
564 tl_assert(!lc_extras[ch_no].pending);
565 lc_extras[ch_no].pending = True;
njn8225cc02009-03-09 22:52:24 +0000566 }
sewardjb5f6f512005-03-10 23:59:00 +0000567}
568
njn8225cc02009-03-09 22:52:24 +0000569// Return the index of the chunk on the top of the mark stack, or -1 if
570// there isn't one.
571static Bool lc_pop(Int* ret)
sewardjb5f6f512005-03-10 23:59:00 +0000572{
njn8225cc02009-03-09 22:52:24 +0000573 if (-1 == lc_markstack_top) {
574 return False;
575 } else {
576 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
577 *ret = lc_markstack[lc_markstack_top];
578 lc_markstack_top--;
tom1d0f3f62010-10-04 20:55:21 +0000579 tl_assert(lc_extras[*ret].pending);
580 lc_extras[*ret].pending = False;
njn8225cc02009-03-09 22:52:24 +0000581 return True;
582 }
583}
sewardjb5f6f512005-03-10 23:59:00 +0000584
philippeab1fce92013-09-29 13:47:32 +0000585static const HChar* pp_heuristic(LeakCheckHeuristic h)
586{
587 switch(h) {
588 case LchNone: return "none";
589 case LchStdString: return "stdstring";
590 case LchNewArray: return "newarray";
591 case LchMultipleInheritance: return "multipleinheritance";
592 default: return "???invalid heuristic???";
593 }
594}
595
596// True if ptr looks like the address of a vtable, i.e. if ptr
597// points to an array of pointers to functions.
598// It is assumed the only caller of this function is heuristic_reachedness
599// which must check that ptr is aligned and above page 0.
600// Checking that ptr is above page 0 is an optimisation : it is assumed
601// that no vtable is located in the page 0. So, all small integer values
602// encountered during the scan will not incur the cost of calling this
603// function.
604static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
605{
606 // ??? If performance problem:
607 // ??? maybe implement a cache (array indexed by ptr % primenr)
608 // ??? of "I am a vtable ptr" ???
609
610 // ??? Maybe the debug info could (efficiently?) be used to detect vtables ?
611
612 // We consider ptr as a vtable ptr if it points to a table
613 // where we find only NULL pointers or pointers pointing at an
614 // executable region. We must find at least 2 non NULL pointers
615 // before considering ptr as a vtable pointer.
616 // We scan a maximum of VTABLE_MAX_CHECK words for these 2 non NULL
617 // pointers.
618#define VTABLE_MAX_CHECK 20
619
620 NSegment const *seg;
621 UInt nr_fn_ptrs = 0;
622 Addr scan;
623 Addr scan_max;
624
625 // First verify ptr points inside a client mapped file section.
626 // ??? is a vtable always in a file mapped readable section ?
627 seg = VG_(am_find_nsegment) (ptr);
628 if (seg == NULL
629 || seg->kind != SkFileC
630 || !seg->hasR)
631 return False;
632
633 // Check potential function pointers, up to a maximum of VTABLE_MAX_CHECK.
634 scan_max = ptr + VTABLE_MAX_CHECK*sizeof(Addr);
635 // If ptr is near the end of seg, avoid scan_max exceeding the end of seg:
636 if (scan_max > seg->end - sizeof(Addr))
637 scan_max = seg->end - sizeof(Addr);
638 for (scan = ptr; scan <= scan_max; scan+=sizeof(Addr)) {
639 Addr pot_fn = *((Addr *)scan);
640 if (pot_fn == 0)
641 continue; // NULL fn pointer. Seems it can happen in vtable.
642 seg = VG_(am_find_nsegment) (pot_fn);
643#if defined(VGA_ppc64)
644 // ppc64 use a thunk table. So, we have one more level of indirection
645 // to follow.
646 if (seg == NULL
647 || seg->kind != SkFileC
648 || !seg->hasR
649 || !seg->hasW)
650 return False; // ptr to nowhere, or not a ptr to thunks.
651 pot_fn = *((Addr *)pot_fn);
652 if (pot_fn == 0)
653 continue; // NULL fn pointer. Seems it can happen in vtable.
654 seg = VG_(am_find_nsegment) (pot_fn);
655#endif
656 if (seg == NULL
657 || seg->kind != SkFileC
658 || !seg->hasT)
659 return False; // ptr to nowhere, or not a fn ptr.
660 nr_fn_ptrs++;
661 if (nr_fn_ptrs == 2)
662 return True;
663 }
664
665 return False;
666}
667
668// If ch is heuristically reachable via an heuristic member of heur_set,
669// returns this heuristic.
670// If ch cannot be considered reachable using one of these heuristics,
671// return LchNone.
672// This should only be called when ptr is an interior ptr to ch.
673// The StdString/NewArray/MultipleInheritance heuristics are directly
674// inspired from DrMemory:
675// see http://www.burningcutlery.com/derek/docs/drmem-CGO11.pdf [section VI,C]
676// and bug 280271.
677static LeakCheckHeuristic heuristic_reachedness (Addr ptr,
678 MC_Chunk *ch, LC_Extra *ex,
679 UInt heur_set)
680{
681 if (HiS(LchStdString, heur_set)) {
682 // Detects inner pointers to Std::String for layout being
683 // length capacity refcount char_array[] \0
684 // where ptr points to the beginning of the char_array.
685 if ( ptr == ch->data + 3 * sizeof(SizeT)) {
686 const SizeT length = *((SizeT*)ch->data);
687 const SizeT capacity = *((SizeT*)ch->data+1);
688 if (length <= capacity
689 && (3 * sizeof(SizeT) + capacity + 1 == ch->szB)) {
690 // ??? could check there is no null byte from ptr to ptr+length-1
691 // ??? and that there is a null byte at ptr+length.
692 // ???
693 // ??? could check that ch->allockind is MC_AllocNew ???
694 // ??? probably not a good idea, as I guess stdstring
695 // ??? allocator can be done via custom allocator
696 // ??? or even a call to malloc ????
697 return LchStdString;
698 }
699 }
700 }
701
702 if (HiS(LchNewArray, heur_set)) {
703 // Detects inner pointers at second word of new[] array, following
704 // a plausible nr of elements.
705 // Such inner pointers are used for arrays of elements
706 // having a destructor, as the delete[] of the array must know
707 // how many elements to destroy.
708 //
709 // We have a strange/wrong case for 'ptr = new MyClass[0];' :
710 // For such a case, the returned ptr points just outside the
711 // allocated chunk. This chunk is then seen as a definite
712 // leak by Valgrind, as it is not considered an interior pointer.
713 // It is the c++ equivalent of bug 99923 (malloc(0) wrongly considered
714 // as definitely leaked). See the trick in find_chunk_for handling
715 // 0-sized block. This trick does not work for 'new MyClass[0]'
716 // because a chunk "word-sized" is allocated to store the (0) nr
717 // of elements.
718 if ( ptr == ch->data + sizeof(SizeT)) {
719 const SizeT nr_elts = *((SizeT*)ch->data);
720 if (nr_elts > 0 && (ch->szB - sizeof(SizeT)) % nr_elts == 0) {
721 // ??? could check that ch->allockind is MC_AllocNewVec ???
722 return LchNewArray;
723 }
724 }
725 }
726
727 if (HiS(LchMultipleInheritance, heur_set)) {
728 // Detect inner pointer used for multiple inheritance.
729 // Assumption is that the vtable pointers are before the object.
730 if (VG_IS_WORD_ALIGNED(ptr)) {
731 Addr first_addr;
732 Addr inner_addr;
733
734 // Avoid the call to is_vtable_addr when the addr is not
735 // aligned or points in the page0, as it is unlikely
736 // a vtable is located in this page. This last optimisation
737 // avoids to call aligned_ptr_above_page0_is_vtable_addr
738 // for all small integers.
739 // Note: we could possibly also avoid calling this function
740 // for small negative integers, as no vtable should be located
741 // in the last page.
742 inner_addr = *((Addr*)ptr);
743 if (VG_IS_WORD_ALIGNED(inner_addr)
744 && inner_addr >= (Addr)VKI_PAGE_SIZE) {
745 first_addr = *((Addr*)ch->data);
746 if (VG_IS_WORD_ALIGNED(first_addr)
747 && first_addr >= (Addr)VKI_PAGE_SIZE
748 && aligned_ptr_above_page0_is_vtable_addr(inner_addr)
749 && aligned_ptr_above_page0_is_vtable_addr(first_addr)) {
750 // ??? could check that ch->allockind is MC_AllocNew ???
751 return LchMultipleInheritance;
752 }
753 }
754 }
755 }
756
757 return LchNone;
758}
759
njn8225cc02009-03-09 22:52:24 +0000760
761// If 'ptr' is pointing to a heap-allocated block which hasn't been seen
762// before, push it onto the mark stack.
763static void
764lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
765{
766 Int ch_no;
767 MC_Chunk* ch;
768 LC_Extra* ex;
philippeab1fce92013-09-29 13:47:32 +0000769 Reachedness ch_via_ptr; // Is ch reachable via ptr, and how ?
njn8225cc02009-03-09 22:52:24 +0000770
771 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
772 return;
philippeab1fce92013-09-29 13:47:32 +0000773
774 if (ex->state == Reachable) {
775 // If block was considered reachable via an heuristic,
776 // and it is now directly reachable via ptr, clear the
777 // heuristic.
778 if (ex->heuristic && ptr == ch->data) {
779 // ch was up to now considered as reachable dur to
780 // ex->heuristic. We have a direct ptr now => clear
781 // the heuristic field.
782 ex->heuristic = LchNone;
783 }
784 return;
785 }
tom1d0f3f62010-10-04 20:55:21 +0000786
njn8225cc02009-03-09 22:52:24 +0000787 // Possibly upgrade the state, ie. one of:
788 // - Unreached --> Possible
789 // - Unreached --> Reachable
790 // - Possible --> Reachable
philippeab1fce92013-09-29 13:47:32 +0000791
792 if (ptr == ch->data)
793 ch_via_ptr = Reachable;
794 else if (detect_memory_leaks_last_heuristics) {
795 ex->heuristic
796 = heuristic_reachedness (ptr, ch, ex,
797 detect_memory_leaks_last_heuristics);
798 if (ex->heuristic)
799 ch_via_ptr = Reachable;
800 else
801 ch_via_ptr = Possible;
802 } else
803 ch_via_ptr = Possible;
804
805 if (ch_via_ptr == Reachable && is_prior_definite) {
806 // 'ptr' points to the start of the block or is to be considered as
807 // pointing to the start of the block, and the prior node is
njn8225cc02009-03-09 22:52:24 +0000808 // definite, which means that this block is definitely reachable.
809 ex->state = Reachable;
810
tom1d0f3f62010-10-04 20:55:21 +0000811 // State has changed to Reachable so (re)scan the block to make
812 // sure any blocks it points to are correctly marked.
813 lc_push(ch_no, ch);
814
njn8225cc02009-03-09 22:52:24 +0000815 } else if (ex->state == Unreached) {
816 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
817 // which means that we can only mark this block as possibly reachable.
818 ex->state = Possible;
tom1d0f3f62010-10-04 20:55:21 +0000819
820 // State has changed to Possible so (re)scan the block to make
821 // sure any blocks it points to are correctly marked.
822 lc_push(ch_no, ch);
njn8225cc02009-03-09 22:52:24 +0000823 }
824}
825
826static void
florian6bd9dc12012-11-23 16:17:43 +0000827lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
njn8225cc02009-03-09 22:52:24 +0000828{
829 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
830}
831
832// If ptr is pointing to a heap-allocated block which hasn't been seen
833// before, push it onto the mark stack. Clique is the index of the
834// clique leader.
835static void
philippea22f59d2012-01-26 23:13:52 +0000836lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
njn8225cc02009-03-09 22:52:24 +0000837{
838 Int ch_no;
839 MC_Chunk* ch;
840 LC_Extra* ex;
841
842 tl_assert(0 <= clique && clique < lc_n_chunks);
843
844 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
845 return;
846
847 // If it's not Unreached, it's already been handled so ignore it.
848 // If ch_no==clique, it's the clique leader, which means this is a cyclic
849 // structure; again ignore it because it's already been handled.
850 if (ex->state == Unreached && ch_no != clique) {
851 // Note that, unlike reachable blocks, we currently don't distinguish
852 // between start-pointers and interior-pointers here. We probably
853 // should, though.
njn8225cc02009-03-09 22:52:24 +0000854 lc_push(ch_no, ch);
855
856 // Add the block to the clique, and add its size to the
857 // clique-leader's indirect size. Also, if the new block was
858 // itself a clique leader, it isn't any more, so add its
859 // indirect_szB to the new clique leader.
860 if (VG_DEBUG_CLIQUE) {
philippea22f59d2012-01-26 23:13:52 +0000861 if (ex->IorC.indirect_szB > 0)
njn8225cc02009-03-09 22:52:24 +0000862 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
bartdc429d12011-07-29 14:24:07 +0000863 ch_no, clique, (unsigned long)ch->szB,
philippea22f59d2012-01-26 23:13:52 +0000864 (unsigned long)ex->IorC.indirect_szB);
njn8225cc02009-03-09 22:52:24 +0000865 else
866 VG_(printf)(" block %d joining clique %d adding %lu\n",
bartdc429d12011-07-29 14:24:07 +0000867 ch_no, clique, (unsigned long)ch->szB);
njn8225cc02009-03-09 22:52:24 +0000868 }
869
philippea22f59d2012-01-26 23:13:52 +0000870 lc_extras[clique].IorC.indirect_szB += ch->szB;
871 lc_extras[clique].IorC.indirect_szB += ex->IorC.indirect_szB;
872 ex->state = IndirectLeak;
873 ex->IorC.clique = (SizeT) cur_clique;
njn8225cc02009-03-09 22:52:24 +0000874 }
875}
876
877static void
philippeab1fce92013-09-29 13:47:32 +0000878lc_push_if_a_chunk_ptr(Addr ptr,
879 Int clique, Int cur_clique, Bool is_prior_definite)
njn8225cc02009-03-09 22:52:24 +0000880{
881 if (-1 == clique)
882 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
883 else
philippea22f59d2012-01-26 23:13:52 +0000884 lc_push_with_clique_if_a_chunk_ptr(ptr, clique, cur_clique);
sewardjb5f6f512005-03-10 23:59:00 +0000885}
886
sewardj45d94cc2005-04-20 14:44:11 +0000887
sewardj97d3ebb2011-04-11 18:36:34 +0000888static VG_MINIMAL_JMP_BUF(memscan_jmpbuf);
sewardjb5f6f512005-03-10 23:59:00 +0000889
njn8225cc02009-03-09 22:52:24 +0000890static
891void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
sewardjb5f6f512005-03-10 23:59:00 +0000892{
njn8225cc02009-03-09 22:52:24 +0000893 if (0)
894 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
895 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
sewardj6c591e12011-04-11 16:17:51 +0000896 VG_MINIMAL_LONGJMP(memscan_jmpbuf);
njn8225cc02009-03-09 22:52:24 +0000897}
898
philippea22f59d2012-01-26 23:13:52 +0000899// lc_scan_memory has 2 modes:
900//
901// 1. Leak check mode (searched == 0).
902// -----------------------------------
njn8225cc02009-03-09 22:52:24 +0000903// Scan a block of memory between [start, start+len). This range may
904// be bogus, inaccessable, or otherwise strange; we deal with it. For each
905// valid aligned word we assume it's a pointer to a chunk a push the chunk
906// onto the mark stack if so.
philippea22f59d2012-01-26 23:13:52 +0000907// clique is the "highest level clique" in which indirectly leaked blocks have
908// to be collected. cur_clique is the current "lower" level clique through which
909// the memory to be scanned has been found.
910// Example: in the below tree if A is leaked, the top level clique will
911// be A, while lower level cliques will be B and C.
912/*
913 A
914 / \
915 B C
916 / \ / \
917 D E F G
918*/
919// Proper handling of top and lowest level clique allows block_list of a loss
920// record to describe the hierarchy of indirectly leaked blocks.
921//
922// 2. Search ptr mode (searched != 0).
923// -----------------------------------
924// In this mode, searches for pointers to a specific address range
philippeab1fce92013-09-29 13:47:32 +0000925// In such a case, lc_scan_memory just scans [start..start+len[ for pointers
926// to searched and outputs the places where searched is found.
927// It does not recursively scans the found memory.
njn8225cc02009-03-09 22:52:24 +0000928static void
philippeab1fce92013-09-29 13:47:32 +0000929lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite,
930 Int clique, Int cur_clique,
philippea22f59d2012-01-26 23:13:52 +0000931 Addr searched, SizeT szB)
njn8225cc02009-03-09 22:52:24 +0000932{
philippe57a16a22012-07-18 22:26:51 +0000933 /* memory scan is based on the assumption that valid pointers are aligned
934 on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
935 end portions of the block if they are not aligned on sizeof(Addr):
936 These cannot be a valid pointer, and calls to MC_(is_valid_aligned_word)
937 will assert for a non aligned address. */
njn8225cc02009-03-09 22:52:24 +0000938 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
njn13bfd852005-06-02 03:52:53 +0000939 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
sewardjb5f6f512005-03-10 23:59:00 +0000940 vki_sigset_t sigmask;
941
942 if (VG_DEBUG_LEAKCHECK)
njn8225cc02009-03-09 22:52:24 +0000943 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
944
sewardjb5f6f512005-03-10 23:59:00 +0000945 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
njn695c16e2005-03-27 03:40:28 +0000946 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
sewardjb5f6f512005-03-10 23:59:00 +0000947
philippe57a16a22012-07-18 22:26:51 +0000948 /* Optimisation: the loop below will check for each begin
949 of SM chunk if the chunk is fully unaddressable. The idea is to
950 skip efficiently such fully unaddressable SM chunks.
951 So, we preferrably start the loop on a chunk boundary.
952 If the chunk is not fully unaddressable, we might be in
953 an unaddressable page. Again, the idea is to skip efficiently
954 such unaddressable page : this is the "else" part.
955 We use an "else" so that two consecutive fully unaddressable
956 SM chunks will be skipped efficiently: first one is skipped
957 by this piece of code. The next SM chunk will be skipped inside
958 the loop. */
959 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
960 // Skip an invalid SM chunk till the beginning of the next SM Chunk.
961 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
962 } else if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
963 // else we are in a (at least partially) valid SM chunk.
964 // We might be in the middle of an unreadable page.
965 // Do a cheap check to see if it's valid;
966 // if not, skip onto the next page.
njn8225cc02009-03-09 22:52:24 +0000967 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
philippe57a16a22012-07-18 22:26:51 +0000968 }
969 /* This optimisation and below loop is based on some relationships between
970 VKI_PAGE_SIZE, SM_SIZE and sizeof(Addr) which are asserted in
971 MC_(detect_memory_leaks). */
sewardjb5f6f512005-03-10 23:59:00 +0000972
sewardj05fe85e2005-04-27 22:46:36 +0000973 while (ptr < end) {
sewardjb5f6f512005-03-10 23:59:00 +0000974 Addr addr;
975
njn8225cc02009-03-09 22:52:24 +0000976 // Skip invalid chunks.
philippe57a16a22012-07-18 22:26:51 +0000977 if (UNLIKELY((ptr % SM_SIZE) == 0)) {
978 if (! MC_(is_within_valid_secondary)(ptr) ) {
979 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
980 continue;
981 }
sewardjb5f6f512005-03-10 23:59:00 +0000982 }
983
njn8225cc02009-03-09 22:52:24 +0000984 // Look to see if this page seems reasonable.
philippe57a16a22012-07-18 22:26:51 +0000985 if (UNLIKELY((ptr % VKI_PAGE_SIZE) == 0)) {
njn8225cc02009-03-09 22:52:24 +0000986 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
987 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
988 continue;
989 }
philippe57a16a22012-07-18 22:26:51 +0000990 // aspacemgr indicates the page is readable and belongs to client.
991 // We still probe the page explicitely in case aspacemgr is
992 // desynchronised with the real page mappings.
993 // Such a desynchronisation can happen due to an aspacemgr bug.
994 // Note that if the application is using mprotect(NONE), then
995 // a page can be unreadable but have addressable and defined
996 // VA bits (see mc_main.c function mc_new_mem_mprotect).
997 if (VG_MINIMAL_SETJMP(memscan_jmpbuf) == 0) {
998 // Try a read in the beginning of the page ...
999 Addr test = *(volatile Addr *)ptr;
1000 __asm__ __volatile__("": :"r"(test) : "cc","memory");
1001 } else {
1002 // Catch read error ...
1003 // We need to restore the signal mask, because we were
1004 // longjmped out of a signal handler.
1005 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
1006 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
1007 continue;
1008 }
sewardjb5f6f512005-03-10 23:59:00 +00001009 }
1010
philippe57a16a22012-07-18 22:26:51 +00001011 if ( MC_(is_valid_aligned_word)(ptr) ) {
1012 lc_scanned_szB += sizeof(Addr);
1013 addr = *(Addr *)ptr;
1014 // If we get here, the scanned word is in valid memory. Now
1015 // let's see if its contents point to a chunk.
1016 if (UNLIKELY(searched)) {
1017 if (addr >= searched && addr < searched + szB) {
philippeab1fce92013-09-29 13:47:32 +00001018 if (addr == searched) {
philippe57a16a22012-07-18 22:26:51 +00001019 VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
philippeab1fce92013-09-29 13:47:32 +00001020 MC_(pp_describe_addr) (ptr);
1021 } else {
1022 Int ch_no;
1023 MC_Chunk *ch;
1024 LC_Extra *ex;
philippe57a16a22012-07-18 22:26:51 +00001025 VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
1026 ptr, (long unsigned) addr - searched, searched);
philippeab1fce92013-09-29 13:47:32 +00001027 MC_(pp_describe_addr) (ptr);
1028 if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
1029 Int h;
1030 for (h = LchStdString; h <= LchMultipleInheritance; h++) {
1031 if (heuristic_reachedness(addr, ch, ex, H2S(h)) == h) {
1032 VG_(umsg)("block at %#lx considered reachable "
1033 "by ptr %#lx using %s heuristic\n",
1034 ch->data, addr, pp_heuristic(h));
1035 }
1036 }
philippe5bd40602013-10-02 20:59:05 +00001037 // Verify the loop above has properly scanned all heuristics.
1038 // If the below fails, it probably means the LeakCheckHeuristic
1039 // enum is not in sync anymore with the above loop and/or
1040 // with N_LEAK_CHECK_HEURISTICS.
1041 tl_assert (h == N_LEAK_CHECK_HEURISTICS);
philippeab1fce92013-09-29 13:47:32 +00001042 }
1043 }
philippea22f59d2012-01-26 23:13:52 +00001044 }
philippe57a16a22012-07-18 22:26:51 +00001045 } else {
1046 lc_push_if_a_chunk_ptr(addr, clique, cur_clique, is_prior_definite);
njn8225cc02009-03-09 22:52:24 +00001047 }
philippe57a16a22012-07-18 22:26:51 +00001048 } else if (0 && VG_DEBUG_LEAKCHECK) {
1049 VG_(printf)("%#lx not valid\n", ptr);
sewardjb5f6f512005-03-10 23:59:00 +00001050 }
philippe57a16a22012-07-18 22:26:51 +00001051 ptr += sizeof(Addr);
sewardjb5f6f512005-03-10 23:59:00 +00001052 }
1053
1054 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
1055 VG_(set_fault_catcher)(NULL);
1056}
1057
sewardj45d94cc2005-04-20 14:44:11 +00001058
njn8225cc02009-03-09 22:52:24 +00001059// Process the mark stack until empty.
1060static void lc_process_markstack(Int clique)
sewardjb5f6f512005-03-10 23:59:00 +00001061{
njne3675d62009-05-19 02:08:25 +00001062 Int top = -1; // shut gcc up
njn8225cc02009-03-09 22:52:24 +00001063 Bool is_prior_definite;
sewardjb5f6f512005-03-10 23:59:00 +00001064
njn8225cc02009-03-09 22:52:24 +00001065 while (lc_pop(&top)) {
tom1d0f3f62010-10-04 20:55:21 +00001066 tl_assert(top >= 0 && top < lc_n_chunks);
sewardjb5f6f512005-03-10 23:59:00 +00001067
njn8225cc02009-03-09 22:52:24 +00001068 // See comment about 'is_prior_definite' at the top to understand this.
1069 is_prior_definite = ( Possible != lc_extras[top].state );
sewardjb5f6f512005-03-10 23:59:00 +00001070
njn8225cc02009-03-09 22:52:24 +00001071 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
philippea22f59d2012-01-26 23:13:52 +00001072 is_prior_definite, clique, (clique == -1 ? -1 : top),
1073 /*searched*/ 0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001074 }
1075}
1076
njn29a5c012009-05-06 06:15:55 +00001077static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
1078{
florian3e798632012-11-24 19:41:54 +00001079 const LossRecordKey* a = key;
1080 const LossRecordKey* b = &(((const LossRecord*)elem)->key);
njn29a5c012009-05-06 06:15:55 +00001081
1082 // Compare on states first because that's fast.
1083 if (a->state < b->state) return -1;
1084 if (a->state > b->state) return 1;
1085 // Ok, the states are equal. Now compare the locations, which is slower.
1086 if (VG_(eq_ExeContext)(
1087 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
1088 return 0;
1089 // Different locations. Ordering is arbitrary, just use the ec pointer.
1090 if (a->allocated_at < b->allocated_at) return -1;
1091 if (a->allocated_at > b->allocated_at) return 1;
1092 VG_(tool_panic)("bad LossRecord comparison");
1093}
1094
florian6bd9dc12012-11-23 16:17:43 +00001095static Int cmp_LossRecords(const void* va, const void* vb)
njn29a5c012009-05-06 06:15:55 +00001096{
florian3e798632012-11-24 19:41:54 +00001097 const LossRecord* lr_a = *(const LossRecord *const *)va;
1098 const LossRecord* lr_b = *(const LossRecord *const *)vb;
njn29a5c012009-05-06 06:15:55 +00001099 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
1100 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
1101
1102 // First compare by sizes.
1103 if (total_szB_a < total_szB_b) return -1;
1104 if (total_szB_a > total_szB_b) return 1;
1105 // If size are equal, compare by states.
1106 if (lr_a->key.state < lr_b->key.state) return -1;
1107 if (lr_a->key.state > lr_b->key.state) return 1;
njne10c7f82009-05-06 06:52:47 +00001108 // If they're still equal here, it doesn't matter that much, but we keep
1109 // comparing other things so that regtests are as deterministic as
1110 // possible. So: compare num_blocks.
1111 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
1112 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
1113 // Finally, compare ExeContext addresses... older ones are likely to have
1114 // lower addresses.
1115 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
1116 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
njn29a5c012009-05-06 06:15:55 +00001117 return 0;
1118}
1119
philippea22f59d2012-01-26 23:13:52 +00001120// allocates or reallocates lr_array, and set its elements to the loss records
1121// contains in lr_table.
1122static Int get_lr_array_from_lr_table(void) {
1123 Int i, n_lossrecords;
1124 LossRecord* lr;
1125
1126 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1127
1128 // (re-)create the array of pointers to the loss records.
1129 // lr_array is kept to allow producing the block list from gdbserver.
1130 if (lr_array != NULL)
1131 VG_(free)(lr_array);
1132 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
1133 i = 0;
1134 VG_(OSetGen_ResetIter)(lr_table);
1135 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
1136 lr_array[i++] = lr;
1137 }
1138 tl_assert(i == n_lossrecords);
1139 return n_lossrecords;
1140}
1141
philippe84234902012-01-14 13:53:13 +00001142
1143static void get_printing_rules(LeakCheckParams* lcp,
1144 LossRecord* lr,
1145 Bool* count_as_error,
1146 Bool* print_record)
sewardjb5f6f512005-03-10 23:59:00 +00001147{
philippe84234902012-01-14 13:53:13 +00001148 // Rules for printing:
1149 // - We don't show suppressed loss records ever (and that's controlled
1150 // within the error manager).
philippe2193a7c2012-12-08 17:54:16 +00001151 // - We show non-suppressed loss records that are specified in
1152 // --show-leak-kinds=... if --leak-check=yes.
philippe84234902012-01-14 13:53:13 +00001153
1154 Bool delta_considered;
1155
1156 switch (lcp->deltamode) {
1157 case LCD_Any:
1158 delta_considered = lr->num_blocks > 0;
1159 break;
1160 case LCD_Increased:
1161 delta_considered
1162 = lr->szB > lr->old_szB
1163 || lr->indirect_szB > lr->old_indirect_szB
1164 || lr->num_blocks > lr->old_num_blocks;
1165 break;
1166 case LCD_Changed:
1167 delta_considered = lr->szB != lr->old_szB
1168 || lr->indirect_szB != lr->old_indirect_szB
1169 || lr->num_blocks != lr->old_num_blocks;
1170 break;
1171 default:
1172 tl_assert(0);
1173 }
1174
philippe2193a7c2012-12-08 17:54:16 +00001175 *print_record = lcp->mode == LC_Full && delta_considered
1176 && RiS(lr->key.state,lcp->show_leak_kinds);
philippe84234902012-01-14 13:53:13 +00001177 // We don't count a leaks as errors with lcp->mode==LC_Summary.
1178 // Otherwise you can get high error counts with few or no error
philippe2193a7c2012-12-08 17:54:16 +00001179 // messages, which can be confusing. Otherwise, we count as errors
1180 // the leak kinds requested by --errors-for-leak-kinds=...
1181 *count_as_error = lcp->mode == LC_Full && delta_considered
1182 && RiS(lr->key.state,lcp->errors_for_leak_kinds);
philippe84234902012-01-14 13:53:13 +00001183}
1184
1185static void print_results(ThreadId tid, LeakCheckParams* lcp)
1186{
1187 Int i, n_lossrecords, start_lr_output_scan;
njn29a5c012009-05-06 06:15:55 +00001188 LossRecord* lr;
1189 Bool is_suppressed;
philippeab1fce92013-09-29 13:47:32 +00001190 /* old_* variables are used to report delta in summary. */
1191 SizeT old_bytes_leaked = MC_(bytes_leaked);
sewardjc8bd1df2011-06-26 12:41:33 +00001192 SizeT old_bytes_indirect = MC_(bytes_indirect);
1193 SizeT old_bytes_dubious = MC_(bytes_dubious);
1194 SizeT old_bytes_reachable = MC_(bytes_reachable);
1195 SizeT old_bytes_suppressed = MC_(bytes_suppressed);
1196 SizeT old_blocks_leaked = MC_(blocks_leaked);
1197 SizeT old_blocks_indirect = MC_(blocks_indirect);
1198 SizeT old_blocks_dubious = MC_(blocks_dubious);
1199 SizeT old_blocks_reachable = MC_(blocks_reachable);
1200 SizeT old_blocks_suppressed = MC_(blocks_suppressed);
sewardjb5f6f512005-03-10 23:59:00 +00001201
philippeab1fce92013-09-29 13:47:32 +00001202 SizeT old_bytes_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1203 SizeT old_blocks_heuristically_reachable[N_LEAK_CHECK_HEURISTICS];
1204
1205 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++) {
1206 old_bytes_heuristically_reachable[i]
1207 = MC_(bytes_heuristically_reachable)[i];
1208 MC_(bytes_heuristically_reachable)[i] = 0;
1209 old_blocks_heuristically_reachable[i]
1210 = MC_(blocks_heuristically_reachable)[i];
1211 MC_(blocks_heuristically_reachable)[i] = 0;
1212 }
1213
sewardjc8bd1df2011-06-26 12:41:33 +00001214 if (lr_table == NULL)
1215 // Create the lr_table, which holds the loss records.
1216 // If the lr_table already exists, it means it contains
1217 // loss_records from the previous leak search. The old_*
1218 // values in these records are used to implement the
1219 // leak check delta mode
1220 lr_table =
1221 VG_(OSetGen_Create)(offsetof(LossRecord, key),
1222 cmp_LossRecordKey_LossRecord,
1223 VG_(malloc), "mc.pr.1",
1224 VG_(free));
1225
philippea22f59d2012-01-26 23:13:52 +00001226 // If we have loss records from a previous search, reset values to have
1227 // proper printing of the deltas between previous search and this search.
1228 n_lossrecords = get_lr_array_from_lr_table();
1229 for (i = 0; i < n_lossrecords; i++) {
philippe4bbfc5f2012-02-27 21:52:45 +00001230 if (lr_array[i]->num_blocks == 0) {
philippea22f59d2012-01-26 23:13:52 +00001231 // remove from lr_table the old loss_records with 0 bytes found
1232 VG_(OSetGen_Remove) (lr_table, &lr_array[i]->key);
philippe4bbfc5f2012-02-27 21:52:45 +00001233 VG_(OSetGen_FreeNode)(lr_table, lr_array[i]);
1234 } else {
philippea22f59d2012-01-26 23:13:52 +00001235 // move the leak sizes to old_* and zero the current sizes
1236 // for next leak search
1237 lr_array[i]->old_szB = lr_array[i]->szB;
1238 lr_array[i]->old_indirect_szB = lr_array[i]->indirect_szB;
1239 lr_array[i]->old_num_blocks = lr_array[i]->num_blocks;
1240 lr_array[i]->szB = 0;
1241 lr_array[i]->indirect_szB = 0;
1242 lr_array[i]->num_blocks = 0;
1243 }
1244 }
1245 // lr_array now contains "invalid" loss records => free it.
1246 // lr_array will be re-created below with the kept and new loss records.
1247 VG_(free) (lr_array);
1248 lr_array = NULL;
njn29a5c012009-05-06 06:15:55 +00001249
1250 // Convert the chunks into loss records, merging them where appropriate.
njn8225cc02009-03-09 22:52:24 +00001251 for (i = 0; i < lc_n_chunks; i++) {
njn29a5c012009-05-06 06:15:55 +00001252 MC_Chunk* ch = lc_chunks[i];
1253 LC_Extra* ex = &(lc_extras)[i];
1254 LossRecord* old_lr;
1255 LossRecordKey lrkey;
1256 lrkey.state = ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001257 lrkey.allocated_at = MC_(allocated_at)(ch);
sewardjb5f6f512005-03-10 23:59:00 +00001258
philippeab1fce92013-09-29 13:47:32 +00001259 if (ex->heuristic) {
1260 MC_(bytes_heuristically_reachable)[ex->heuristic] += ch->szB;
1261 MC_(blocks_heuristically_reachable)[ex->heuristic]++;
1262 if (VG_DEBUG_LEAKCHECK)
1263 VG_(printf)("heuristic %s %#lx len %lu\n",
1264 pp_heuristic(ex->heuristic),
1265 ch->data, (unsigned long)ch->szB);
1266 }
1267
njn29a5c012009-05-06 06:15:55 +00001268 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1269 if (old_lr) {
1270 // We found an existing loss record matching this chunk. Update the
1271 // loss record's details in-situ. This is safe because we don't
1272 // change the elements used as the OSet key.
1273 old_lr->szB += ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001274 if (ex->state == Unreached)
1275 old_lr->indirect_szB += ex->IorC.indirect_szB;
njn29a5c012009-05-06 06:15:55 +00001276 old_lr->num_blocks++;
sewardjb5f6f512005-03-10 23:59:00 +00001277 } else {
njn29a5c012009-05-06 06:15:55 +00001278 // No existing loss record matches this chunk. Create a new loss
1279 // record, initialise it from the chunk, and insert it into lr_table.
1280 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
1281 lr->key = lrkey;
1282 lr->szB = ch->szB;
philippea22f59d2012-01-26 23:13:52 +00001283 if (ex->state == Unreached)
1284 lr->indirect_szB = ex->IorC.indirect_szB;
1285 else
1286 lr->indirect_szB = 0;
njn29a5c012009-05-06 06:15:55 +00001287 lr->num_blocks = 1;
sewardjc8bd1df2011-06-26 12:41:33 +00001288 lr->old_szB = 0;
1289 lr->old_indirect_szB = 0;
1290 lr->old_num_blocks = 0;
njn29a5c012009-05-06 06:15:55 +00001291 VG_(OSetGen_Insert)(lr_table, lr);
sewardjb5f6f512005-03-10 23:59:00 +00001292 }
1293 }
1294
philippea22f59d2012-01-26 23:13:52 +00001295 // (re-)create the array of pointers to the (new) loss records.
1296 n_lossrecords = get_lr_array_from_lr_table ();
1297 tl_assert(VG_(OSetGen_Size)(lr_table) == n_lossrecords);
njn29a5c012009-05-06 06:15:55 +00001298
1299 // Sort the array by loss record sizes.
1300 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
1301 cmp_LossRecords);
1302
1303 // Zero totals.
njn8225cc02009-03-09 22:52:24 +00001304 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
1305 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
1306 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
1307 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
1308 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
1309
philippe84234902012-01-14 13:53:13 +00001310 // If there is a maximum nr of loss records we can output, then first
1311 // compute from where the output scan has to start.
1312 // By default, start from the first loss record. Compute a higher
1313 // value if there is a maximum to respect. We need to print the last
1314 // records, as the one with the biggest sizes are more interesting.
1315 start_lr_output_scan = 0;
1316 if (lcp->mode == LC_Full && lcp->max_loss_records_output < n_lossrecords) {
1317 Int nr_printable_records = 0;
1318 for (i = n_lossrecords - 1; i >= 0 && start_lr_output_scan == 0; i--) {
1319 Bool count_as_error, print_record;
1320 lr = lr_array[i];
1321 get_printing_rules (lcp, lr, &count_as_error, &print_record);
1322 // Do not use get_printing_rules results for is_suppressed, as we
1323 // only want to check if the record would be suppressed.
1324 is_suppressed =
1325 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr,
1326 False /* print_record */,
1327 False /* count_as_error */);
1328 if (print_record && !is_suppressed) {
1329 nr_printable_records++;
1330 if (nr_printable_records == lcp->max_loss_records_output)
1331 start_lr_output_scan = i;
1332 }
sewardjc8bd1df2011-06-26 12:41:33 +00001333 }
philippe84234902012-01-14 13:53:13 +00001334 }
sewardjc8bd1df2011-06-26 12:41:33 +00001335
philippe84234902012-01-14 13:53:13 +00001336 // Print the loss records (in size order) and collect summary stats.
1337 for (i = start_lr_output_scan; i < n_lossrecords; i++) {
1338 Bool count_as_error, print_record;
1339 lr = lr_array[i];
1340 get_printing_rules(lcp, lr, &count_as_error, &print_record);
sewardjb5f6f512005-03-10 23:59:00 +00001341 is_suppressed =
njn18afe5d2009-08-10 08:25:39 +00001342 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr, print_record,
1343 count_as_error );
sewardjb5f6f512005-03-10 23:59:00 +00001344
1345 if (is_suppressed) {
njn29a5c012009-05-06 06:15:55 +00001346 MC_(blocks_suppressed) += lr->num_blocks;
1347 MC_(bytes_suppressed) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001348
njn29a5c012009-05-06 06:15:55 +00001349 } else if (Unreached == lr->key.state) {
1350 MC_(blocks_leaked) += lr->num_blocks;
1351 MC_(bytes_leaked) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001352
njn29a5c012009-05-06 06:15:55 +00001353 } else if (IndirectLeak == lr->key.state) {
1354 MC_(blocks_indirect) += lr->num_blocks;
1355 MC_(bytes_indirect) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001356
njn29a5c012009-05-06 06:15:55 +00001357 } else if (Possible == lr->key.state) {
1358 MC_(blocks_dubious) += lr->num_blocks;
1359 MC_(bytes_dubious) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001360
njn29a5c012009-05-06 06:15:55 +00001361 } else if (Reachable == lr->key.state) {
1362 MC_(blocks_reachable) += lr->num_blocks;
1363 MC_(bytes_reachable) += lr->szB;
sewardjb5f6f512005-03-10 23:59:00 +00001364
1365 } else {
njn8225cc02009-03-09 22:52:24 +00001366 VG_(tool_panic)("unknown loss mode");
sewardjb5f6f512005-03-10 23:59:00 +00001367 }
sewardjb5f6f512005-03-10 23:59:00 +00001368 }
sewardjb5f6f512005-03-10 23:59:00 +00001369
njn8225cc02009-03-09 22:52:24 +00001370 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
florian19f91bb2012-11-10 22:29:54 +00001371 HChar d_bytes[20];
1372 HChar d_blocks[20];
philippeab1fce92013-09-29 13:47:32 +00001373# define DBY(new,old) \
1374 MC_(snprintf_delta) (d_bytes, 20, (new), (old), lcp->deltamode)
1375# define DBL(new,old) \
1376 MC_(snprintf_delta) (d_blocks, 20, (new), (old), lcp->deltamode)
sewardjc8bd1df2011-06-26 12:41:33 +00001377
sewardj6b523cd2009-07-15 14:49:40 +00001378 VG_(umsg)("LEAK SUMMARY:\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001379 VG_(umsg)(" definitely lost: %'lu%s bytes in %'lu%s blocks\n",
1380 MC_(bytes_leaked),
philippeab1fce92013-09-29 13:47:32 +00001381 DBY (MC_(bytes_leaked), old_bytes_leaked),
sewardjc8bd1df2011-06-26 12:41:33 +00001382 MC_(blocks_leaked),
philippeab1fce92013-09-29 13:47:32 +00001383 DBL (MC_(blocks_leaked), old_blocks_leaked));
sewardjc8bd1df2011-06-26 12:41:33 +00001384 VG_(umsg)(" indirectly lost: %'lu%s bytes in %'lu%s blocks\n",
1385 MC_(bytes_indirect),
philippeab1fce92013-09-29 13:47:32 +00001386 DBY (MC_(bytes_indirect), old_bytes_indirect),
sewardjc8bd1df2011-06-26 12:41:33 +00001387 MC_(blocks_indirect),
philippeab1fce92013-09-29 13:47:32 +00001388 DBL (MC_(blocks_indirect), old_blocks_indirect));
sewardjc8bd1df2011-06-26 12:41:33 +00001389 VG_(umsg)(" possibly lost: %'lu%s bytes in %'lu%s blocks\n",
1390 MC_(bytes_dubious),
philippeab1fce92013-09-29 13:47:32 +00001391 DBY (MC_(bytes_dubious), old_bytes_dubious),
sewardjc8bd1df2011-06-26 12:41:33 +00001392 MC_(blocks_dubious),
philippeab1fce92013-09-29 13:47:32 +00001393 DBL (MC_(blocks_dubious), old_blocks_dubious));
sewardjc8bd1df2011-06-26 12:41:33 +00001394 VG_(umsg)(" still reachable: %'lu%s bytes in %'lu%s blocks\n",
1395 MC_(bytes_reachable),
philippeab1fce92013-09-29 13:47:32 +00001396 DBY (MC_(bytes_reachable), old_bytes_reachable),
sewardjc8bd1df2011-06-26 12:41:33 +00001397 MC_(blocks_reachable),
philippeab1fce92013-09-29 13:47:32 +00001398 DBL (MC_(blocks_reachable), old_blocks_reachable));
1399 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1400 if (old_blocks_heuristically_reachable[i] > 0
1401 || MC_(blocks_heuristically_reachable)[i] > 0) {
1402 VG_(umsg)(" of which "
1403 "reachable via heuristic:\n");
1404 break;
1405 }
1406 for (i = 0; i < N_LEAK_CHECK_HEURISTICS; i++)
1407 if (old_blocks_heuristically_reachable[i] > 0
1408 || MC_(blocks_heuristically_reachable)[i] > 0)
1409 VG_(umsg)(" %19s: "
1410 "%'lu%s bytes in %'lu%s blocks\n",
1411 pp_heuristic(i),
1412 MC_(bytes_heuristically_reachable)[i],
1413 DBY (MC_(bytes_heuristically_reachable)[i],
1414 old_bytes_heuristically_reachable[i]),
1415 MC_(blocks_heuristically_reachable)[i],
1416 DBL (MC_(blocks_heuristically_reachable)[i],
1417 old_blocks_heuristically_reachable[i]));
sewardjc8bd1df2011-06-26 12:41:33 +00001418 VG_(umsg)(" suppressed: %'lu%s bytes in %'lu%s blocks\n",
1419 MC_(bytes_suppressed),
philippeab1fce92013-09-29 13:47:32 +00001420 DBY (MC_(bytes_suppressed), old_bytes_suppressed),
sewardjc8bd1df2011-06-26 12:41:33 +00001421 MC_(blocks_suppressed),
philippeab1fce92013-09-29 13:47:32 +00001422 DBL (MC_(blocks_suppressed), old_blocks_suppressed));
philippe84234902012-01-14 13:53:13 +00001423 if (lcp->mode != LC_Full &&
njn8225cc02009-03-09 22:52:24 +00001424 (MC_(blocks_leaked) + MC_(blocks_indirect) +
1425 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
philippe84234902012-01-14 13:53:13 +00001426 if (lcp->requested_by_monitor_command)
philippeab1fce92013-09-29 13:47:32 +00001427 VG_(umsg)("To see details of leaked memory, "
1428 "give 'full' arg to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001429 else
1430 VG_(umsg)("Rerun with --leak-check=full to see details "
1431 "of leaked memory\n");
njn8225cc02009-03-09 22:52:24 +00001432 }
philippe84234902012-01-14 13:53:13 +00001433 if (lcp->mode == LC_Full &&
philippeab1fce92013-09-29 13:47:32 +00001434 MC_(blocks_reachable) > 0 && !RiS(Reachable,lcp->show_leak_kinds)) {
sewardj6b523cd2009-07-15 14:49:40 +00001435 VG_(umsg)("Reachable blocks (those to which a pointer "
1436 "was found) are not shown.\n");
philippe84234902012-01-14 13:53:13 +00001437 if (lcp->requested_by_monitor_command)
sewardj30b3eca2011-06-28 08:20:39 +00001438 VG_(umsg)("To see them, add 'reachable any' args to leak_check\n");
sewardjc8bd1df2011-06-26 12:41:33 +00001439 else
1440 VG_(umsg)("To see them, rerun with: --leak-check=full "
philippe2193a7c2012-12-08 17:54:16 +00001441 "--show-leak-kinds=all\n");
sewardjb5f6f512005-03-10 23:59:00 +00001442 }
njnb6267bd2009-08-12 00:14:16 +00001443 VG_(umsg)("\n");
philippeab1fce92013-09-29 13:47:32 +00001444 #undef DBL
1445 #undef DBY
sewardjb5f6f512005-03-10 23:59:00 +00001446 }
1447}
1448
philippea22f59d2012-01-26 23:13:52 +00001449// print recursively all indirectly leaked blocks collected in clique.
1450static void print_clique (Int clique, UInt level)
1451{
1452 Int ind;
1453 Int i, n_lossrecords;;
1454
1455 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1456
1457 for (ind = 0; ind < lc_n_chunks; ind++) {
1458 LC_Extra* ind_ex = &(lc_extras)[ind];
philippeab1fce92013-09-29 13:47:32 +00001459 if (ind_ex->state == IndirectLeak
1460 && ind_ex->IorC.clique == (SizeT) clique) {
philippea22f59d2012-01-26 23:13:52 +00001461 MC_Chunk* ind_ch = lc_chunks[ind];
1462 LossRecord* ind_lr;
1463 LossRecordKey ind_lrkey;
1464 Int lr_i;
1465 ind_lrkey.state = ind_ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001466 ind_lrkey.allocated_at = MC_(allocated_at)(ind_ch);
philippea22f59d2012-01-26 23:13:52 +00001467 ind_lr = VG_(OSetGen_Lookup)(lr_table, &ind_lrkey);
1468 for (lr_i = 0; lr_i < n_lossrecords; lr_i++)
1469 if (ind_lr == lr_array[lr_i])
1470 break;
1471 for (i = 0; i < level; i++)
1472 VG_(umsg)(" ");
1473 VG_(umsg)("%p[%lu] indirect loss record %d\n",
1474 (void *)ind_ch->data, (unsigned long)ind_ch->szB,
1475 lr_i+1); // lr_i+1 for user numbering.
1476 if (lr_i >= n_lossrecords)
1477 VG_(umsg)
1478 ("error: no indirect loss record found for %p[%lu]?????\n",
1479 (void *)ind_ch->data, (unsigned long)ind_ch->szB);
1480 print_clique(ind, level+1);
1481 }
1482 }
1483 }
1484
1485Bool MC_(print_block_list) ( UInt loss_record_nr)
1486{
1487 Int i, n_lossrecords;
1488 LossRecord* lr;
1489
1490 if (lr_table == NULL || lc_chunks == NULL || lc_extras == NULL) {
1491 VG_(umsg)("Can't print block list : no valid leak search result\n");
1492 return False;
1493 }
1494
1495 if (lc_chunks_n_frees_marker != MC_(get_cmalloc_n_frees)()) {
1496 VG_(umsg)("Can't print obsolete block list : redo a leak search first\n");
1497 return False;
1498 }
1499
1500 n_lossrecords = VG_(OSetGen_Size)(lr_table);
1501 if (loss_record_nr >= n_lossrecords)
1502 return False; // Invalid loss record nr.
1503
1504 tl_assert (lr_array);
1505 lr = lr_array[loss_record_nr];
1506
1507 // (re-)print the loss record details.
1508 // (+1 on loss_record_nr as user numbering for loss records starts at 1).
1509 MC_(pp_LossRecord)(loss_record_nr+1, n_lossrecords, lr);
1510
1511 // Match the chunks with loss records.
1512 for (i = 0; i < lc_n_chunks; i++) {
1513 MC_Chunk* ch = lc_chunks[i];
1514 LC_Extra* ex = &(lc_extras)[i];
1515 LossRecord* old_lr;
1516 LossRecordKey lrkey;
1517 lrkey.state = ex->state;
philippe8617b5b2013-01-12 19:53:08 +00001518 lrkey.allocated_at = MC_(allocated_at)(ch);
philippea22f59d2012-01-26 23:13:52 +00001519
1520 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
1521 if (old_lr) {
1522 // We found an existing loss record matching this chunk.
philippeab1fce92013-09-29 13:47:32 +00001523 // If this is the loss record we are looking for, output the pointer.
philippea22f59d2012-01-26 23:13:52 +00001524 if (old_lr == lr_array[loss_record_nr]) {
1525 VG_(umsg)("%p[%lu]\n",
1526 (void *)ch->data, (unsigned long) ch->szB);
1527 if (ex->state != Reachable) {
1528 // We can print the clique in all states, except Reachable.
1529 // In Unreached state, lc_chunk[i] is the clique leader.
1530 // In IndirectLeak, lc_chunk[i] might have been a clique leader
1531 // which was later collected in another clique.
1532 // For Possible, lc_chunk[i] might be the top of a clique
1533 // or an intermediate clique.
1534 print_clique(i, 1);
1535 }
1536 }
1537 } else {
1538 // No existing loss record matches this chunk ???
1539 VG_(umsg)("error: no loss record found for %p[%lu]?????\n",
1540 (void *)ch->data, (unsigned long) ch->szB);
1541 }
1542 }
1543 return True;
1544}
1545
1546// If searched = 0, scan memory root set, pushing onto the mark stack the blocks
1547// encountered.
philippeab1fce92013-09-29 13:47:32 +00001548// Otherwise (searched != 0), scan the memory root set searching for ptr
1549// pointing inside [searched, searched+szB[.
philippea22f59d2012-01-26 23:13:52 +00001550static void scan_memory_root_set(Addr searched, SizeT szB)
1551{
1552 Int i;
1553 Int n_seg_starts;
1554 Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );
1555
1556 tl_assert(seg_starts && n_seg_starts > 0);
1557
1558 lc_scanned_szB = 0;
1559
1560 // VG_(am_show_nsegments)( 0, "leakcheck");
1561 for (i = 0; i < n_seg_starts; i++) {
1562 SizeT seg_size;
1563 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
1564 tl_assert(seg);
1565
1566 if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
1567 if (!(seg->hasR && seg->hasW)) continue;
1568 if (seg->isCH) continue;
1569
1570 // Don't poke around in device segments as this may cause
1571 // hangs. Exclude /dev/zero just in case someone allocated
1572 // memory by explicitly mapping /dev/zero.
1573 if (seg->kind == SkFileC
1574 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
florian3e798632012-11-24 19:41:54 +00001575 HChar* dev_name = VG_(am_get_filename)( seg );
philippea22f59d2012-01-26 23:13:52 +00001576 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1577 // Don't skip /dev/zero.
1578 } else {
1579 // Skip this device mapping.
1580 continue;
1581 }
1582 }
1583
1584 if (0)
1585 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1586
1587 // Scan the segment. We use -1 for the clique number, because this
1588 // is a root-set.
1589 seg_size = seg->end - seg->start + 1;
1590 if (VG_(clo_verbosity) > 2) {
1591 VG_(message)(Vg_DebugMsg,
1592 " Scanning root segment: %#lx..%#lx (%lu)\n",
1593 seg->start, seg->end, seg_size);
1594 }
1595 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True,
1596 /*clique*/-1, /*cur_clique*/-1,
1597 searched, szB);
1598 }
philippe7d69fd92012-02-26 21:26:00 +00001599 VG_(free)(seg_starts);
philippea22f59d2012-01-26 23:13:52 +00001600}
1601
njn8225cc02009-03-09 22:52:24 +00001602/*------------------------------------------------------------*/
1603/*--- Top-level entry point. ---*/
1604/*------------------------------------------------------------*/
sewardj3cf26a52006-07-27 23:48:53 +00001605
philippe84234902012-01-14 13:53:13 +00001606void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
njn43c799e2003-04-08 00:08:52 +00001607{
njnb965efb2009-08-10 07:36:54 +00001608 Int i, j;
njn43c799e2003-04-08 00:08:52 +00001609
philippe84234902012-01-14 13:53:13 +00001610 tl_assert(lcp->mode != LC_Off);
sewardjc8bd1df2011-06-26 12:41:33 +00001611
philippe57a16a22012-07-18 22:26:51 +00001612 // Verify some assertions which are used in lc_scan_memory.
1613 tl_assert((VKI_PAGE_SIZE % sizeof(Addr)) == 0);
1614 tl_assert((SM_SIZE % sizeof(Addr)) == 0);
1615 // Above two assertions are critical, while below assertion
1616 // ensures that the optimisation in the loop is done in the
1617 // correct order : the loop checks for (big) SM chunk skipping
1618 // before checking for (smaller) page skipping.
1619 tl_assert((SM_SIZE % VKI_PAGE_SIZE) == 0);
1620
1621
philippe84234902012-01-14 13:53:13 +00001622 MC_(detect_memory_leaks_last_delta_mode) = lcp->deltamode;
philippeab1fce92013-09-29 13:47:32 +00001623 detect_memory_leaks_last_heuristics = lcp->heuristics;
njn43c799e2003-04-08 00:08:52 +00001624
njn8225cc02009-03-09 22:52:24 +00001625 // Get the chunks, stop if there were none.
philippea22f59d2012-01-26 23:13:52 +00001626 if (lc_chunks) {
1627 VG_(free)(lc_chunks);
1628 lc_chunks = NULL;
1629 }
njn8225cc02009-03-09 22:52:24 +00001630 lc_chunks = find_active_chunks(&lc_n_chunks);
philippea22f59d2012-01-26 23:13:52 +00001631 lc_chunks_n_frees_marker = MC_(get_cmalloc_n_frees)();
njn8225cc02009-03-09 22:52:24 +00001632 if (lc_n_chunks == 0) {
1633 tl_assert(lc_chunks == NULL);
sewardjc8bd1df2011-06-26 12:41:33 +00001634 if (lr_table != NULL) {
philippea22f59d2012-01-26 23:13:52 +00001635 // forget the previous recorded LossRecords as next leak search
1636 // can in any case just create new leaks.
sewardjc8bd1df2011-06-26 12:41:33 +00001637 // Maybe it would be better to rather call print_result ?
philippea22f59d2012-01-26 23:13:52 +00001638 // (at least when leak decreases are requested)
sewardjc8bd1df2011-06-26 12:41:33 +00001639 // This will then output all LossRecords with a size decreasing to 0
1640 VG_(OSetGen_Destroy) (lr_table);
philippea22f59d2012-01-26 23:13:52 +00001641 lr_table = NULL;
sewardjc8bd1df2011-06-26 12:41:33 +00001642 }
sewardj71bc3cb2005-05-19 00:25:45 +00001643 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00001644 VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
sewardj2d9e8742009-08-07 15:46:56 +00001645 VG_(umsg)("\n");
sewardj37d06f22003-09-17 21:48:26 +00001646 }
njn43c799e2003-04-08 00:08:52 +00001647 return;
1648 }
1649
njn8225cc02009-03-09 22:52:24 +00001650 // Sort the array so blocks are in ascending order in memory.
1651 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
njn43c799e2003-04-08 00:08:52 +00001652
njn8225cc02009-03-09 22:52:24 +00001653 // Sanity check -- make sure they're in order.
1654 for (i = 0; i < lc_n_chunks-1; i++) {
1655 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
1656 }
njn43c799e2003-04-08 00:08:52 +00001657
njnb965efb2009-08-10 07:36:54 +00001658 // Sanity check -- make sure they don't overlap. The one exception is that
1659 // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
1660 // This is for bug 100628. If this occurs, we ignore the malloc() block
1661 // for leak-checking purposes. This is a hack and probably should be done
1662 // better, but at least it's consistent with mempools (which are treated
1663 // like this in find_active_chunks). Mempools have a separate VgHashTable
1664 // for mempool chunks, but if custom-allocated blocks are put in a separate
1665 // table from normal heap blocks it makes free-mismatch checking more
1666 // difficult.
1667 //
1668 // If this check fails, it probably means that the application
njn8225cc02009-03-09 22:52:24 +00001669 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
njnb965efb2009-08-10 07:36:54 +00001670 // requests, eg. has made overlapping requests (which are
1671 // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
1672 // again nonsensical.
1673 //
njn8225cc02009-03-09 22:52:24 +00001674 for (i = 0; i < lc_n_chunks-1; i++) {
1675 MC_Chunk* ch1 = lc_chunks[i];
1676 MC_Chunk* ch2 = lc_chunks[i+1];
njnb965efb2009-08-10 07:36:54 +00001677
1678 Addr start1 = ch1->data;
1679 Addr start2 = ch2->data;
1680 Addr end1 = ch1->data + ch1->szB - 1;
1681 Addr end2 = ch2->data + ch2->szB - 1;
1682 Bool isCustom1 = ch1->allockind == MC_AllocCustom;
1683 Bool isCustom2 = ch2->allockind == MC_AllocCustom;
1684
1685 if (end1 < start2) {
1686 // Normal case - no overlap.
1687
1688 // We used to allow exact duplicates, I'm not sure why. --njn
1689 //} else if (start1 == start2 && end1 == end2) {
1690 // Degenerate case: exact duplicates.
1691
1692 } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
1693 // Block i is MALLOCLIKE and entirely within block i+1.
1694 // Remove block i+1.
1695 for (j = i+1; j < lc_n_chunks-1; j++) {
1696 lc_chunks[j] = lc_chunks[j+1];
1697 }
1698 lc_n_chunks--;
1699
1700 } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
1701 // Block i+1 is MALLOCLIKE and entirely within block i.
1702 // Remove block i.
1703 for (j = i; j < lc_n_chunks-1; j++) {
1704 lc_chunks[j] = lc_chunks[j+1];
1705 }
1706 lc_n_chunks--;
1707
1708 } else {
philippe09007e32012-03-01 22:00:36 +00001709 VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
bart3c4fa9f2011-05-09 10:46:55 +00001710 start1, end1, start2, end2);
philippe09007e32012-03-01 22:00:36 +00001711 VG_(umsg)("Blocks allocation contexts:\n"),
philippe8617b5b2013-01-12 19:53:08 +00001712 VG_(pp_ExeContext)( MC_(allocated_at)(ch1));
philippe09007e32012-03-01 22:00:36 +00001713 VG_(umsg)("\n"),
philippe8617b5b2013-01-12 19:53:08 +00001714 VG_(pp_ExeContext)( MC_(allocated_at)(ch2));
njnb965efb2009-08-10 07:36:54 +00001715 VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
philippe09007e32012-03-01 22:00:36 +00001716 VG_(umsg)("in an inappropriate way.\n");
njnb965efb2009-08-10 07:36:54 +00001717 tl_assert (0);
njn8225cc02009-03-09 22:52:24 +00001718 }
njn8225cc02009-03-09 22:52:24 +00001719 }
1720
1721 // Initialise lc_extras.
philippea22f59d2012-01-26 23:13:52 +00001722 if (lc_extras) {
1723 VG_(free)(lc_extras);
1724 lc_extras = NULL;
1725 }
njn8225cc02009-03-09 22:52:24 +00001726 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
1727 for (i = 0; i < lc_n_chunks; i++) {
1728 lc_extras[i].state = Unreached;
tom1d0f3f62010-10-04 20:55:21 +00001729 lc_extras[i].pending = False;
philippeab1fce92013-09-29 13:47:32 +00001730 lc_extras[i].heuristic = LchNone;
philippea22f59d2012-01-26 23:13:52 +00001731 lc_extras[i].IorC.indirect_szB = 0;
njn8225cc02009-03-09 22:52:24 +00001732 }
1733
1734 // Initialise lc_markstack.
1735 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
1736 for (i = 0; i < lc_n_chunks; i++) {
1737 lc_markstack[i] = -1;
sewardjb5f6f512005-03-10 23:59:00 +00001738 }
1739 lc_markstack_top = -1;
njn43c799e2003-04-08 00:08:52 +00001740
njn8225cc02009-03-09 22:52:24 +00001741 // Verbosity.
sewardj2d9e8742009-08-07 15:46:56 +00001742 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
njnb6267bd2009-08-12 00:14:16 +00001743 VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
sewardj6b523cd2009-07-15 14:49:40 +00001744 lc_n_chunks );
sewardj2d9e8742009-08-07 15:46:56 +00001745 }
sewardjb5f6f512005-03-10 23:59:00 +00001746
njn8225cc02009-03-09 22:52:24 +00001747 // Scan the memory root-set, pushing onto the mark stack any blocks
1748 // pointed to.
philippea22f59d2012-01-26 23:13:52 +00001749 scan_memory_root_set(/*searched*/0, 0);
sewardjb5f6f512005-03-10 23:59:00 +00001750
njn8225cc02009-03-09 22:52:24 +00001751 // Scan GP registers for chunk pointers.
1752 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
sewardjb5f6f512005-03-10 23:59:00 +00001753
njn8225cc02009-03-09 22:52:24 +00001754 // Process the pushed blocks. After this, every block that is reachable
1755 // from the root-set has been traced.
1756 lc_process_markstack(/*clique*/-1);
njn43c799e2003-04-08 00:08:52 +00001757
njnb6267bd2009-08-12 00:14:16 +00001758 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
1759 VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
1760 VG_(umsg)( "\n" );
1761 }
njn43c799e2003-04-08 00:08:52 +00001762
njn8225cc02009-03-09 22:52:24 +00001763 // Trace all the leaked blocks to determine which are directly leaked and
1764 // which are indirectly leaked. For each Unreached block, push it onto
1765 // the mark stack, and find all the as-yet-Unreached blocks reachable
1766 // from it. These form a clique and are marked IndirectLeak, and their
1767 // size is added to the clique leader's indirect size. If one of the
1768 // found blocks was itself a clique leader (from a previous clique), then
1769 // the cliques are merged.
1770 for (i = 0; i < lc_n_chunks; i++) {
1771 MC_Chunk* ch = lc_chunks[i];
1772 LC_Extra* ex = &(lc_extras[i]);
njn43c799e2003-04-08 00:08:52 +00001773
njn8225cc02009-03-09 22:52:24 +00001774 if (VG_DEBUG_CLIQUE)
1775 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
1776 i, ch->data, ex->state);
njn43c799e2003-04-08 00:08:52 +00001777
njn8225cc02009-03-09 22:52:24 +00001778 tl_assert(lc_markstack_top == -1);
1779
1780 if (ex->state == Unreached) {
1781 if (VG_DEBUG_CLIQUE)
1782 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
1783
1784 // Push this Unreached block onto the stack and process it.
1785 lc_push(i, ch);
philippea22f59d2012-01-26 23:13:52 +00001786 lc_process_markstack(/*clique*/i);
njn8225cc02009-03-09 22:52:24 +00001787
1788 tl_assert(lc_markstack_top == -1);
1789 tl_assert(ex->state == Unreached);
nethercote0f19bce2003-12-02 10:17:44 +00001790 }
njn43c799e2003-04-08 00:08:52 +00001791 }
njn8225cc02009-03-09 22:52:24 +00001792
sewardjc8bd1df2011-06-26 12:41:33 +00001793 print_results( tid, lcp);
njn43c799e2003-04-08 00:08:52 +00001794
sewardjb5f6f512005-03-10 23:59:00 +00001795 VG_(free) ( lc_markstack );
philippea22f59d2012-01-26 23:13:52 +00001796 lc_markstack = NULL;
1797 // lc_chunks, lc_extras, lr_array and lr_table are kept (needed if user
1798 // calls MC_(print_block_list)). lr_table also used for delta leak reporting
1799 // between this leak search and the next leak search.
1800}
1801
1802static Addr searched_wpa;
1803static SizeT searched_szB;
1804static void
florian6bd9dc12012-11-23 16:17:43 +00001805search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
philippea22f59d2012-01-26 23:13:52 +00001806{
1807 if (addr_in_reg >= searched_wpa
1808 && addr_in_reg < searched_wpa + searched_szB) {
1809 if (addr_in_reg == searched_wpa)
1810 VG_(umsg)
1811 ("tid %d register %s pointing at %#lx\n",
1812 tid, regname, searched_wpa);
1813 else
1814 VG_(umsg)
1815 ("tid %d register %s interior pointing %lu bytes inside %#lx\n",
1816 tid, regname, (long unsigned) addr_in_reg - searched_wpa,
1817 searched_wpa);
1818 }
1819}
1820
1821void MC_(who_points_at) ( Addr address, SizeT szB)
1822{
1823 MC_Chunk** chunks;
1824 Int n_chunks;
1825 Int i;
1826
1827 if (szB == 1)
1828 VG_(umsg) ("Searching for pointers to %#lx\n", address);
1829 else
1830 VG_(umsg) ("Searching for pointers pointing in %lu bytes from %#lx\n",
1831 szB, address);
1832
philippeab1fce92013-09-29 13:47:32 +00001833 chunks = find_active_chunks(&n_chunks);
1834
philippea22f59d2012-01-26 23:13:52 +00001835 // Scan memory root-set, searching for ptr pointing in address[szB]
1836 scan_memory_root_set(address, szB);
1837
1838 // Scan active malloc-ed chunks
philippea22f59d2012-01-26 23:13:52 +00001839 for (i = 0; i < n_chunks; i++) {
1840 lc_scan_memory(chunks[i]->data, chunks[i]->szB,
1841 /*is_prior_definite*/True,
1842 /*clique*/-1, /*cur_clique*/-1,
1843 address, szB);
1844 }
1845 VG_(free) ( chunks );
1846
1847 // Scan GP registers for pointers to address range.
1848 searched_wpa = address;
1849 searched_szB = szB;
1850 VG_(apply_to_GP_regs)(search_address_in_GP_reg);
1851
njn43c799e2003-04-08 00:08:52 +00001852}
1853
1854/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00001855/*--- end ---*/
njn43c799e2003-04-08 00:08:52 +00001856/*--------------------------------------------------------------------*/
1857