njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
| 3 | /*--- The leak checker, shared between Memcheck and Addrcheck. ---*/ |
| 4 | /*--- mac_leakcheck.c ---*/ |
| 5 | /*--------------------------------------------------------------------*/ |
| 6 | |
| 7 | /* |
nethercote | 137bc55 | 2003-11-14 17:47:54 +0000 | [diff] [blame] | 8 | This file is part of MemCheck, a heavyweight Valgrind tool for |
| 9 | detecting memory errors, and AddrCheck, a lightweight Valgrind tool |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 10 | for detecting memory errors. |
| 11 | |
nethercote | bb1c991 | 2004-01-04 16:43:23 +0000 | [diff] [blame] | 12 | Copyright (C) 2000-2004 Julian Seward |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 13 | jseward@acm.org |
| 14 | |
| 15 | This program is free software; you can redistribute it and/or |
| 16 | modify it under the terms of the GNU General Public License as |
| 17 | published by the Free Software Foundation; either version 2 of the |
| 18 | License, or (at your option) any later version. |
| 19 | |
| 20 | This program is distributed in the hope that it will be useful, but |
| 21 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 23 | General Public License for more details. |
| 24 | |
| 25 | You should have received a copy of the GNU General Public License |
| 26 | along with this program; if not, write to the Free Software |
| 27 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 28 | 02111-1307, USA. |
| 29 | |
| 30 | The GNU General Public License is contained in the file COPYING. |
| 31 | */ |
| 32 | |
| 33 | #include "mac_shared.h" |
| 34 | |
| 35 | /* Define to debug the memory-leak-detector. */ |
| 36 | /* #define VG_DEBUG_LEAKCHECK */ |
| 37 | |
| 38 | /*------------------------------------------------------------*/ |
| 39 | /*--- Low-level address-space scanning, for the leak ---*/ |
| 40 | /*--- detector. ---*/ |
| 41 | /*------------------------------------------------------------*/ |
| 42 | |
| 43 | static |
| 44 | jmp_buf memscan_jmpbuf; |
| 45 | |
| 46 | |
| 47 | static |
| 48 | void vg_scan_all_valid_memory_sighandler ( Int sigNo ) |
| 49 | { |
| 50 | __builtin_longjmp(memscan_jmpbuf, 1); |
| 51 | } |
| 52 | |
| 53 | |
| 54 | /* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address |
| 55 | space and pass the addresses and values of all addressible, |
| 56 | defined, aligned words to notify_word. This is the basis for the |
| 57 | leak detector. Returns the number of calls made to notify_word. |
| 58 | |
| 59 | Addresses are validated 3 ways. First we enquire whether (addr >> |
| 60 | 16) denotes a 64k chunk in use, by asking is_valid_64k_chunk(). If |
| 61 | so, we decide for ourselves whether each x86-level (4 K) page in |
| 62 | the chunk is safe to inspect. If yes, we enquire with |
| 63 | is_valid_address() whether or not each of the 1024 word-locations |
| 64 | on the page is valid. Only if so are that address and its contents |
| 65 | passed to notify_word. |
| 66 | |
| 67 | This is all to avoid duplication of this machinery between the |
| 68 | memcheck and addrcheck skins. |
| 69 | */ |
| 70 | static |
| 71 | UInt vg_scan_all_valid_memory ( Bool is_valid_64k_chunk ( UInt ), |
| 72 | Bool is_valid_address ( Addr ), |
| 73 | void (*notify_word)( Addr, UInt ) ) |
| 74 | { |
| 75 | /* All volatile, because some gccs seem paranoid about longjmp(). */ |
| 76 | volatile Bool anyValid; |
| 77 | volatile Addr pageBase, addr; |
| 78 | volatile UInt res, numPages, page, primaryMapNo; |
| 79 | volatile UInt page_first_word, nWordsNotified; |
| 80 | |
| 81 | vki_ksigaction sigbus_saved; |
| 82 | vki_ksigaction sigbus_new; |
| 83 | vki_ksigaction sigsegv_saved; |
| 84 | vki_ksigaction sigsegv_new; |
| 85 | vki_ksigset_t blockmask_saved; |
| 86 | vki_ksigset_t unblockmask_new; |
| 87 | |
| 88 | /* Temporarily install a new sigsegv and sigbus handler, and make |
| 89 | sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the |
| 90 | first two can never be blocked anyway?) */ |
| 91 | |
| 92 | sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler; |
| 93 | sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART; |
| 94 | sigbus_new.ksa_restorer = NULL; |
| 95 | res = VG_(ksigemptyset)( &sigbus_new.ksa_mask ); |
| 96 | sk_assert(res == 0); |
| 97 | |
| 98 | sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler; |
| 99 | sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART; |
| 100 | sigsegv_new.ksa_restorer = NULL; |
| 101 | res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask ); |
| 102 | sk_assert(res == 0+0); |
| 103 | |
| 104 | res = VG_(ksigemptyset)( &unblockmask_new ); |
| 105 | res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS ); |
| 106 | res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV ); |
| 107 | res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM ); |
| 108 | sk_assert(res == 0+0+0); |
| 109 | |
| 110 | res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved ); |
| 111 | sk_assert(res == 0+0+0+0); |
| 112 | |
| 113 | res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved ); |
| 114 | sk_assert(res == 0+0+0+0+0); |
| 115 | |
| 116 | res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved ); |
| 117 | sk_assert(res == 0+0+0+0+0+0); |
| 118 | |
| 119 | /* The signal handlers are installed. Actually do the memory scan. */ |
| 120 | numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS); |
| 121 | sk_assert(numPages == 1048576); |
| 122 | sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS)); |
| 123 | |
| 124 | nWordsNotified = 0; |
| 125 | |
| 126 | for (page = 0; page < numPages; page++) { |
| 127 | |
| 128 | /* Base address of this 4k page. */ |
| 129 | pageBase = page << VKI_BYTES_PER_PAGE_BITS; |
| 130 | |
| 131 | /* Skip if this page is in an unused 64k chunk. */ |
| 132 | primaryMapNo = pageBase >> 16; |
| 133 | if (!is_valid_64k_chunk(primaryMapNo)) |
| 134 | continue; |
| 135 | |
| 136 | /* Next, establish whether or not we want to consider any |
| 137 | locations on this page. We need to do so before actually |
| 138 | prodding it, because prodding it when in fact it is not |
| 139 | needed can cause a page fault which under some rare |
| 140 | circumstances can cause the kernel to extend the stack |
| 141 | segment all the way down to here, which is seriously bad. |
| 142 | Hence: */ |
| 143 | anyValid = False; |
| 144 | for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) { |
| 145 | if (is_valid_address(addr)) { |
| 146 | anyValid = True; |
| 147 | break; |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | if (!anyValid) |
| 152 | continue; /* nothing interesting here .. move to the next page */ |
| 153 | |
| 154 | /* Ok, we have to prod cautiously at the page and see if it |
| 155 | explodes or not. */ |
| 156 | if (__builtin_setjmp(memscan_jmpbuf) == 0) { |
| 157 | /* try this ... */ |
| 158 | page_first_word = * (volatile UInt*)pageBase; |
| 159 | /* we get here if we didn't get a fault */ |
| 160 | /* Scan the page */ |
| 161 | for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) { |
| 162 | if (is_valid_address(addr)) { |
| 163 | nWordsNotified++; |
| 164 | notify_word ( addr, *(UInt*)addr ); |
| 165 | } |
| 166 | } |
| 167 | } else { |
| 168 | /* We get here if reading the first word of the page caused a |
| 169 | fault, which in turn caused the signal handler to longjmp. |
| 170 | Ignore this page. */ |
| 171 | if (0) |
| 172 | VG_(printf)( |
| 173 | "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n", |
| 174 | (void*)pageBase |
| 175 | ); |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | /* Restore signal state to whatever it was before. */ |
| 180 | res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL ); |
| 181 | sk_assert(res == 0 +0); |
| 182 | |
| 183 | res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL ); |
| 184 | sk_assert(res == 0 +0 +0); |
| 185 | |
| 186 | res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL ); |
| 187 | sk_assert(res == 0 +0 +0 +0); |
| 188 | |
| 189 | return nWordsNotified; |
| 190 | } |
| 191 | |
| 192 | /*------------------------------------------------------------*/ |
| 193 | /*--- Detecting leaked (unreachable) malloc'd blocks. ---*/ |
| 194 | /*------------------------------------------------------------*/ |
| 195 | |
| 196 | /* A block is either |
| 197 | -- Proper-ly reached; a pointer to its start has been found |
| 198 | -- Interior-ly reached; only an interior pointer to it has been found |
| 199 | -- Unreached; so far, no pointers to any part of it have been found. |
| 200 | */ |
| 201 | typedef |
| 202 | enum { Unreached, Interior, Proper } |
| 203 | Reachedness; |
| 204 | |
| 205 | /* A block record, used for generating err msgs. */ |
| 206 | typedef |
| 207 | struct _LossRecord { |
| 208 | struct _LossRecord* next; |
| 209 | /* Where these lost blocks were allocated. */ |
| 210 | ExeContext* allocated_at; |
| 211 | /* Their reachability. */ |
| 212 | Reachedness loss_mode; |
| 213 | /* Number of blocks and total # bytes involved. */ |
| 214 | UInt total_bytes; |
| 215 | UInt num_blocks; |
| 216 | } |
| 217 | LossRecord; |
| 218 | |
| 219 | |
| 220 | /* Find the i such that ptr points at or inside the block described by |
| 221 | shadows[i]. Return -1 if none found. This assumes that shadows[] |
| 222 | has been sorted on the ->data field. */ |
| 223 | |
| 224 | #ifdef VG_DEBUG_LEAKCHECK |
| 225 | /* Used to sanity-check the fast binary-search mechanism. */ |
| 226 | static |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 227 | Int find_shadow_for_OLD ( Addr ptr, |
| 228 | MAC_Chunk** shadows, |
| 229 | Int n_shadows ) |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 230 | |
| 231 | { |
| 232 | Int i; |
| 233 | Addr a_lo, a_hi; |
| 234 | PROF_EVENT(70); |
| 235 | for (i = 0; i < n_shadows; i++) { |
| 236 | PROF_EVENT(71); |
| 237 | a_lo = shadows[i]->data; |
| 238 | a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1; |
| 239 | if (a_lo <= ptr && ptr <= a_hi) |
| 240 | return i; |
| 241 | } |
| 242 | return -1; |
| 243 | } |
| 244 | #endif |
| 245 | |
| 246 | |
| 247 | static |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 248 | Int find_shadow_for ( Addr ptr, |
| 249 | MAC_Chunk** shadows, |
| 250 | Int n_shadows ) |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 251 | { |
| 252 | Addr a_mid_lo, a_mid_hi; |
| 253 | Int lo, mid, hi, retVal; |
| 254 | /* VG_(printf)("find shadow for %p = ", ptr); */ |
| 255 | retVal = -1; |
| 256 | lo = 0; |
| 257 | hi = n_shadows-1; |
| 258 | while (True) { |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 259 | /* invariant: current unsearched space is from lo to hi, inclusive. */ |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 260 | if (lo > hi) break; /* not found */ |
| 261 | |
| 262 | mid = (lo + hi) / 2; |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 263 | a_mid_lo = shadows[mid]->data; |
| 264 | a_mid_hi = shadows[mid]->data + shadows[mid]->size - 1; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 265 | |
| 266 | if (ptr < a_mid_lo) { |
| 267 | hi = mid-1; |
| 268 | continue; |
| 269 | } |
| 270 | if (ptr > a_mid_hi) { |
| 271 | lo = mid+1; |
| 272 | continue; |
| 273 | } |
| 274 | sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi); |
| 275 | retVal = mid; |
| 276 | break; |
| 277 | } |
| 278 | |
| 279 | # ifdef VG_DEBUG_LEAKCHECK |
| 280 | sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows )); |
| 281 | # endif |
| 282 | /* VG_(printf)("%d\n", retVal); */ |
| 283 | return retVal; |
| 284 | } |
| 285 | |
| 286 | /* Globals, for the following callback used by VG_(detect_memory_leaks). */ |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 287 | static MAC_Chunk** lc_shadows; |
| 288 | static Int lc_n_shadows; |
| 289 | static Reachedness* lc_reachedness; |
| 290 | static Addr lc_min_mallocd_addr; |
| 291 | static Addr lc_max_mallocd_addr; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 292 | |
| 293 | static |
| 294 | void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a ) |
| 295 | { |
| 296 | Int sh_no; |
| 297 | Addr ptr; |
| 298 | |
| 299 | /* Rule out some known causes of bogus pointers. Mostly these do |
| 300 | not cause much trouble because only a few false pointers can |
| 301 | ever lurk in these places. This mainly stops it reporting that |
| 302 | blocks are still reachable in stupid test programs like this |
| 303 | |
| 304 | int main (void) { char* a = malloc(100); return 0; } |
| 305 | |
| 306 | which people seem inordinately fond of writing, for some reason. |
| 307 | |
| 308 | Note that this is a complete kludge. It would be better to |
| 309 | ignore any addresses corresponding to valgrind.so's .bss and |
| 310 | .data segments, but I cannot think of a reliable way to identify |
| 311 | where the .bss segment has been put. If you can, drop me a |
| 312 | line. |
| 313 | */ |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 314 | if (!VG_(is_client_addr)(a)) return; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 315 | |
| 316 | /* OK, let's get on and do something Useful for a change. */ |
| 317 | |
| 318 | ptr = (Addr)word_at_a; |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 319 | if (ptr >= lc_min_mallocd_addr && ptr <= lc_max_mallocd_addr) { |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 320 | /* Might be legitimate; we'll have to investigate further. */ |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 321 | sh_no = find_shadow_for ( ptr, lc_shadows, lc_n_shadows ); |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 322 | if (sh_no != -1) { |
| 323 | /* Found a block at/into which ptr points. */ |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 324 | sk_assert(sh_no >= 0 && sh_no < lc_n_shadows); |
| 325 | sk_assert(ptr < lc_shadows[sh_no]->data + lc_shadows[sh_no]->size); |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 326 | /* Decide whether Proper-ly or Interior-ly reached. */ |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 327 | if (ptr == lc_shadows[sh_no]->data) { |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 328 | if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 329 | lc_reachedness[sh_no] = Proper; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 330 | } else { |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 331 | if (lc_reachedness[sh_no] == Unreached) |
| 332 | lc_reachedness[sh_no] = Interior; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 333 | } |
| 334 | } |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | /* Used for printing leak errors, avoids exposing the LossRecord type (which |
| 339 | comes in as void*, requiring a cast. */ |
| 340 | void MAC_(pp_LeakError)(void* vl, UInt n_this_record, UInt n_total_records) |
| 341 | { |
| 342 | LossRecord* l = (LossRecord*)vl; |
| 343 | |
| 344 | VG_(message)(Vg_UserMsg, ""); |
| 345 | VG_(message)(Vg_UserMsg, |
| 346 | "%d bytes in %d blocks are %s in loss record %d of %d", |
| 347 | l->total_bytes, l->num_blocks, |
| 348 | l->loss_mode==Unreached ? "definitely lost" |
| 349 | : (l->loss_mode==Interior ? "possibly lost" |
| 350 | : "still reachable"), |
| 351 | n_this_record, n_total_records |
| 352 | ); |
| 353 | VG_(pp_ExeContext)(l->allocated_at); |
| 354 | } |
| 355 | |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 356 | Int MAC_(bytes_leaked) = 0; |
| 357 | Int MAC_(bytes_dubious) = 0; |
| 358 | Int MAC_(bytes_reachable) = 0; |
| 359 | Int MAC_(bytes_suppressed) = 0; |
njn | 47363ab | 2003-04-21 13:24:40 +0000 | [diff] [blame] | 360 | |
njn | 06072ec | 2003-09-30 15:35:13 +0000 | [diff] [blame] | 361 | static Int lc_compar(void* n1, void* n2) |
| 362 | { |
| 363 | MAC_Chunk* mc1 = *(MAC_Chunk**)n1; |
| 364 | MAC_Chunk* mc2 = *(MAC_Chunk**)n2; |
| 365 | return (mc1->data < mc2->data ? -1 : 1); |
| 366 | } |
| 367 | |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 368 | /* Top level entry point to leak detector. Call here, passing in |
| 369 | suitable address-validating functions (see comment at top of |
| 370 | vg_scan_all_valid_memory above). All this is to avoid duplication |
| 371 | of the leak-detection code for the Memcheck and Addrcheck skins. |
| 372 | Also pass in a skin-specific function to extract the .where field |
| 373 | for allocated blocks, an indication of the resolution wanted for |
| 374 | distinguishing different allocation points, and whether or not |
| 375 | reachable blocks should be shown. |
| 376 | */ |
| 377 | void MAC_(do_detect_memory_leaks) ( |
| 378 | Bool is_valid_64k_chunk ( UInt ), |
| 379 | Bool is_valid_address ( Addr ) |
| 380 | ) |
| 381 | { |
| 382 | Int i; |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 383 | Int blocks_leaked; |
| 384 | Int blocks_dubious; |
| 385 | Int blocks_reachable; |
| 386 | Int blocks_suppressed; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 387 | Int n_lossrecords; |
| 388 | UInt bytes_notified; |
| 389 | Bool is_suppressed; |
| 390 | |
| 391 | LossRecord* errlist; |
| 392 | LossRecord* p; |
| 393 | |
njn | 06072ec | 2003-09-30 15:35:13 +0000 | [diff] [blame] | 394 | /* VG_(HT_to_array) allocates storage for shadows */ |
| 395 | lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list), |
| 396 | &lc_n_shadows ); |
| 397 | |
| 398 | /* Sort the array. */ |
| 399 | VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar); |
| 400 | |
| 401 | /* Sanity check; assert that the blocks are now in order */ |
| 402 | for (i = 0; i < lc_n_shadows-1; i++) { |
| 403 | sk_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data); |
| 404 | } |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 405 | |
| 406 | /* Sanity check -- make sure they don't overlap */ |
| 407 | for (i = 0; i < lc_n_shadows-1; i++) { |
| 408 | sk_assert( lc_shadows[i]->data + lc_shadows[i]->size |
| 409 | < lc_shadows[i+1]->data ); |
| 410 | } |
| 411 | |
| 412 | if (lc_n_shadows == 0) { |
| 413 | sk_assert(lc_shadows == NULL); |
sewardj | 37d06f2 | 2003-09-17 21:48:26 +0000 | [diff] [blame] | 414 | if (VG_(clo_verbosity) >= 1) { |
| 415 | VG_(message)(Vg_UserMsg, |
| 416 | "No malloc'd blocks -- no leaks are possible."); |
| 417 | } |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 418 | return; |
| 419 | } |
| 420 | |
nethercote | 0f19bce | 2003-12-02 10:17:44 +0000 | [diff] [blame] | 421 | if (VG_(clo_verbosity) > 0) |
| 422 | VG_(message)(Vg_UserMsg, |
| 423 | "searching for pointers to %d not-freed blocks.", |
| 424 | lc_n_shadows ); |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 425 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 426 | lc_min_mallocd_addr = lc_shadows[0]->data; |
| 427 | lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data |
| 428 | + lc_shadows[lc_n_shadows-1]->size - 1; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 429 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 430 | lc_reachedness = VG_(malloc)( lc_n_shadows * sizeof(Reachedness) ); |
| 431 | for (i = 0; i < lc_n_shadows; i++) |
| 432 | lc_reachedness[i] = Unreached; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 433 | |
| 434 | /* Do the scan of memory. */ |
| 435 | bytes_notified |
| 436 | = VKI_BYTES_PER_WORD |
| 437 | * vg_scan_all_valid_memory ( |
| 438 | is_valid_64k_chunk, |
| 439 | is_valid_address, |
| 440 | &vg_detect_memory_leaks_notify_addr |
| 441 | ); |
| 442 | |
nethercote | 0f19bce | 2003-12-02 10:17:44 +0000 | [diff] [blame] | 443 | if (VG_(clo_verbosity) > 0) |
| 444 | VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified); |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 445 | |
| 446 | /* Common up the lost blocks so we can print sensible error messages. */ |
| 447 | n_lossrecords = 0; |
| 448 | errlist = NULL; |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 449 | for (i = 0; i < lc_n_shadows; i++) { |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 450 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 451 | ExeContext* where = lc_shadows[i]->where; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 452 | |
| 453 | for (p = errlist; p != NULL; p = p->next) { |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 454 | if (p->loss_mode == lc_reachedness[i] |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 455 | && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution), |
| 456 | p->allocated_at, |
| 457 | where) ) { |
| 458 | break; |
| 459 | } |
| 460 | } |
| 461 | if (p != NULL) { |
| 462 | p->num_blocks ++; |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 463 | p->total_bytes += lc_shadows[i]->size; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 464 | } else { |
| 465 | n_lossrecords ++; |
| 466 | p = VG_(malloc)(sizeof(LossRecord)); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 467 | p->loss_mode = lc_reachedness[i]; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 468 | p->allocated_at = where; |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 469 | p->total_bytes = lc_shadows[i]->size; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 470 | p->num_blocks = 1; |
| 471 | p->next = errlist; |
| 472 | errlist = p; |
| 473 | } |
| 474 | } |
| 475 | |
| 476 | /* Print out the commoned-up blocks and collect summary stats. */ |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 477 | blocks_leaked = MAC_(bytes_leaked) = 0; |
| 478 | blocks_dubious = MAC_(bytes_dubious) = 0; |
| 479 | blocks_reachable = MAC_(bytes_reachable) = 0; |
| 480 | blocks_suppressed = MAC_(bytes_suppressed) = 0; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 481 | |
| 482 | for (i = 0; i < n_lossrecords; i++) { |
| 483 | Bool print_record; |
| 484 | LossRecord* p_min = NULL; |
| 485 | UInt n_min = 0xFFFFFFFF; |
| 486 | for (p = errlist; p != NULL; p = p->next) { |
| 487 | if (p->num_blocks > 0 && p->total_bytes < n_min) { |
| 488 | n_min = p->total_bytes; |
| 489 | p_min = p; |
| 490 | } |
| 491 | } |
| 492 | sk_assert(p_min != NULL); |
| 493 | |
| 494 | /* Ok to have tst==NULL; it's only used if --gdb-attach=yes, and |
| 495 | we disallow that when --leak-check=yes. |
| 496 | |
| 497 | Prints the error if not suppressed, unless it's reachable (Proper) |
| 498 | and --show-reachable=no */ |
| 499 | |
| 500 | print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode ); |
| 501 | is_suppressed = |
njn | 7271864 | 2003-07-24 08:45:32 +0000 | [diff] [blame] | 502 | VG_(unique_error) ( VG_(get_current_tid)(), LeakErr, (UInt)i+1, |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 503 | (Char*)n_lossrecords, (void*) p_min, |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 504 | p_min->allocated_at, print_record, |
njn | 47363ab | 2003-04-21 13:24:40 +0000 | [diff] [blame] | 505 | /*allow_GDB_attach*/False, /*count_error*/False ); |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 506 | |
| 507 | if (is_suppressed) { |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 508 | blocks_suppressed += p_min->num_blocks; |
| 509 | MAC_(bytes_suppressed) += p_min->total_bytes; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 510 | |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 511 | } else if (Unreached == p_min->loss_mode) { |
| 512 | blocks_leaked += p_min->num_blocks; |
| 513 | MAC_(bytes_leaked) += p_min->total_bytes; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 514 | |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 515 | } else if (Interior == p_min->loss_mode) { |
| 516 | blocks_dubious += p_min->num_blocks; |
| 517 | MAC_(bytes_dubious) += p_min->total_bytes; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 518 | |
njn | e8b5c05 | 2003-07-22 22:03:58 +0000 | [diff] [blame] | 519 | } else if (Proper == p_min->loss_mode) { |
| 520 | blocks_reachable += p_min->num_blocks; |
| 521 | MAC_(bytes_reachable) += p_min->total_bytes; |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 522 | |
| 523 | } else { |
| 524 | VG_(skin_panic)("generic_detect_memory_leaks: unknown loss mode"); |
| 525 | } |
| 526 | p_min->num_blocks = 0; |
| 527 | } |
| 528 | |
nethercote | 0f19bce | 2003-12-02 10:17:44 +0000 | [diff] [blame] | 529 | if (VG_(clo_verbosity) > 0) { |
| 530 | VG_(message)(Vg_UserMsg, ""); |
| 531 | VG_(message)(Vg_UserMsg, "LEAK SUMMARY:"); |
| 532 | VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.", |
| 533 | MAC_(bytes_leaked), blocks_leaked ); |
| 534 | VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.", |
| 535 | MAC_(bytes_dubious), blocks_dubious ); |
| 536 | VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.", |
| 537 | MAC_(bytes_reachable), blocks_reachable ); |
| 538 | VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.", |
| 539 | MAC_(bytes_suppressed), blocks_suppressed ); |
| 540 | if (!MAC_(clo_show_reachable)) { |
| 541 | VG_(message)(Vg_UserMsg, |
| 542 | "Reachable blocks (those to which a pointer was found) are not shown."); |
| 543 | VG_(message)(Vg_UserMsg, |
| 544 | "To see them, rerun with: --show-reachable=yes"); |
| 545 | } |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 546 | } |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 547 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 548 | VG_(free) ( lc_shadows ); |
| 549 | VG_(free) ( lc_reachedness ); |
njn | 43c799e | 2003-04-08 00:08:52 +0000 | [diff] [blame] | 550 | } |
| 551 | |
| 552 | /*--------------------------------------------------------------------*/ |
| 553 | /*--- end mac_leakcheck.c ---*/ |
| 554 | /*--------------------------------------------------------------------*/ |
| 555 | |