sewardj | 7ce7166 | 2008-05-02 10:33:15 +0000 | [diff] [blame^] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
| 3 | /*--- Management, printing, etc, of errors and suppressions. ---*/ |
| 4 | /*--- mc_errors.c ---*/ |
| 5 | /*--------------------------------------------------------------------*/ |
| 6 | |
| 7 | /* |
| 8 | This file is part of MemCheck, a heavyweight Valgrind tool for |
| 9 | detecting memory errors. |
| 10 | |
| 11 | Copyright (C) 2000-2008 Julian Seward |
| 12 | jseward@acm.org |
| 13 | |
| 14 | This program is free software; you can redistribute it and/or |
| 15 | modify it under the terms of the GNU General Public License as |
| 16 | published by the Free Software Foundation; either version 2 of the |
| 17 | License, or (at your option) any later version. |
| 18 | |
| 19 | This program is distributed in the hope that it will be useful, but |
| 20 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 22 | General Public License for more details. |
| 23 | |
| 24 | You should have received a copy of the GNU General Public License |
| 25 | along with this program; if not, write to the Free Software |
| 26 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 27 | 02111-1307, USA. |
| 28 | |
| 29 | The GNU General Public License is contained in the file COPYING. |
| 30 | */ |
| 31 | |
| 32 | #include "pub_tool_basics.h" |
| 33 | #include "pub_tool_aspacemgr.h" |
| 34 | #include "pub_tool_hashtable.h" // For mc_include.h |
| 35 | #include "pub_tool_libcbase.h" |
| 36 | #include "pub_tool_libcassert.h" |
| 37 | #include "pub_tool_libcprint.h" |
| 38 | #include "pub_tool_machine.h" |
| 39 | #include "pub_tool_mallocfree.h" |
| 40 | #include "pub_tool_options.h" |
| 41 | #include "pub_tool_oset.h" |
| 42 | #include "pub_tool_replacemalloc.h" |
| 43 | #include "pub_tool_tooliface.h" |
| 44 | #include "pub_tool_threadstate.h" |
| 45 | #include "pub_tool_oset.h" |
| 46 | #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset) |
| 47 | |
| 48 | #include "mc_include.h" |
| 49 | #include "memcheck.h" /* for client requests */ |
| 50 | |
| 51 | |
| 52 | /*------------------------------------------------------------*/ |
| 53 | /*--- Error types ---*/ |
| 54 | /*------------------------------------------------------------*/ |
| 55 | |
| 56 | /* See comment in mc_include.h */ |
| 57 | Bool MC_(any_value_errors) = False; |
| 58 | |
| 59 | |
| 60 | // Different kinds of blocks. |
| 61 | typedef enum { |
| 62 | Block_Mallocd = 111, |
| 63 | Block_Freed, |
| 64 | Block_Mempool, |
| 65 | Block_MempoolChunk, |
| 66 | Block_UserG |
| 67 | } BlockKind; |
| 68 | |
| 69 | /* ------------------ Addresses -------------------- */ |
| 70 | |
| 71 | /* The classification of a faulting address. */ |
| 72 | typedef |
| 73 | enum { |
| 74 | Addr_Undescribed, // as-yet unclassified |
| 75 | Addr_Unknown, // classification yielded nothing useful |
| 76 | Addr_Block, // in malloc'd/free'd block |
| 77 | Addr_Stack, // on a thread's stack |
| 78 | Addr_DataSym, // in a global data sym |
| 79 | Addr_Variable, // variable described by the debug info |
| 80 | Addr_SectKind // last-ditch classification attempt |
| 81 | } |
| 82 | AddrTag; |
| 83 | |
| 84 | typedef |
| 85 | struct _AddrInfo |
| 86 | AddrInfo; |
| 87 | |
| 88 | struct _AddrInfo { |
| 89 | AddrTag tag; |
| 90 | union { |
| 91 | // As-yet unclassified. |
| 92 | struct { } Undescribed; |
| 93 | |
| 94 | // On a stack. |
| 95 | struct { |
| 96 | ThreadId tid; // Which thread's stack? |
| 97 | } Stack; |
| 98 | |
| 99 | // This covers heap blocks (normal and from mempools) and user-defined |
| 100 | // blocks. |
| 101 | struct { |
| 102 | BlockKind block_kind; |
| 103 | Char* block_desc; // "block", "mempool" or user-defined |
| 104 | SizeT block_szB; |
| 105 | OffT rwoffset; |
| 106 | ExeContext* lastchange; |
| 107 | } Block; |
| 108 | |
| 109 | // In a global .data symbol. This holds the first 63 chars of |
| 110 | // the variable's (zero terminated), plus an offset. |
| 111 | struct { |
| 112 | Char name[128]; |
| 113 | OffT offset; |
| 114 | } DataSym; |
| 115 | |
| 116 | // Is described by Dwarf debug info. Arbitrary strings. Must |
| 117 | // be the same length. |
| 118 | struct { |
| 119 | Char descr1[96]; |
| 120 | Char descr2[96]; |
| 121 | } Variable; |
| 122 | |
| 123 | // Could only narrow it down to be the PLT/GOT/etc of a given |
| 124 | // object. Better than nothing, perhaps. |
| 125 | struct { |
| 126 | Char objname[128]; |
| 127 | VgSectKind kind; |
| 128 | } SectKind; |
| 129 | |
| 130 | // Classification yielded nothing useful. |
| 131 | struct { } Unknown; |
| 132 | |
| 133 | } Addr; |
| 134 | }; |
| 135 | |
| 136 | /* ------------------ Errors ----------------------- */ |
| 137 | |
| 138 | /* What kind of error it is. */ |
| 139 | typedef |
| 140 | enum { |
| 141 | Err_Value, |
| 142 | Err_Cond, |
| 143 | Err_CoreMem, |
| 144 | Err_Addr, |
| 145 | Err_Jump, |
| 146 | Err_RegParam, |
| 147 | Err_MemParam, |
| 148 | Err_User, |
| 149 | Err_Free, |
| 150 | Err_FreeMismatch, |
| 151 | Err_Overlap, |
| 152 | Err_Leak, |
| 153 | Err_IllegalMempool, |
| 154 | } |
| 155 | MC_ErrorTag; |
| 156 | |
| 157 | |
| 158 | typedef struct _MC_Error MC_Error; |
| 159 | |
| 160 | struct _MC_Error { |
| 161 | // Nb: we don't need the tag here, as it's stored in the Error type! Yuk. |
| 162 | //MC_ErrorTag tag; |
| 163 | |
| 164 | union { |
| 165 | // Use of an undefined value: |
| 166 | // - as a pointer in a load or store |
| 167 | // - as a jump target |
| 168 | struct { |
| 169 | SizeT szB; // size of value in bytes |
| 170 | // Origin info |
| 171 | UInt otag; // origin tag |
| 172 | ExeContext* origin_ec; // filled in later |
| 173 | } Value; |
| 174 | |
| 175 | // Use of an undefined value in a conditional branch or move. |
| 176 | struct { |
| 177 | // Origin info |
| 178 | UInt otag; // origin tag |
| 179 | ExeContext* origin_ec; // filled in later |
| 180 | } Cond; |
| 181 | |
| 182 | // Addressability error in core (signal-handling) operation. |
| 183 | // It would be good to get rid of this error kind, merge it with |
| 184 | // another one somehow. |
| 185 | struct { |
| 186 | } CoreMem; |
| 187 | |
| 188 | // Use of an unaddressable memory location in a load or store. |
| 189 | struct { |
| 190 | Bool isWrite; // read or write? |
| 191 | SizeT szB; // not used for exec (jump) errors |
| 192 | Bool maybe_gcc; // True if just below %esp -- could be a gcc bug |
| 193 | AddrInfo ai; |
| 194 | } Addr; |
| 195 | |
| 196 | // Jump to an unaddressable memory location. |
| 197 | struct { |
| 198 | AddrInfo ai; |
| 199 | } Jump; |
| 200 | |
| 201 | // System call register input contains undefined bytes. |
| 202 | struct { |
| 203 | // Origin info |
| 204 | UInt otag; // origin tag |
| 205 | ExeContext* origin_ec; // filled in later |
| 206 | } RegParam; |
| 207 | |
| 208 | // System call memory input contains undefined/unaddressable bytes |
| 209 | struct { |
| 210 | Bool isAddrErr; // Addressability or definedness error? |
| 211 | AddrInfo ai; |
| 212 | // Origin info |
| 213 | UInt otag; // origin tag |
| 214 | ExeContext* origin_ec; // filled in later |
| 215 | } MemParam; |
| 216 | |
| 217 | // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE. |
| 218 | struct { |
| 219 | Bool isAddrErr; // Addressability or definedness error? |
| 220 | AddrInfo ai; |
| 221 | // Origin info |
| 222 | UInt otag; // origin tag |
| 223 | ExeContext* origin_ec; // filled in later |
| 224 | } User; |
| 225 | |
| 226 | // Program tried to free() something that's not a heap block (this |
| 227 | // covers double-frees). */ |
| 228 | struct { |
| 229 | AddrInfo ai; |
| 230 | } Free; |
| 231 | |
| 232 | // Program allocates heap block with one function |
| 233 | // (malloc/new/new[]/custom) and deallocates with not the matching one. |
| 234 | struct { |
| 235 | AddrInfo ai; |
| 236 | } FreeMismatch; |
| 237 | |
| 238 | // Call to strcpy, memcpy, etc, with overlapping blocks. |
| 239 | struct { |
| 240 | Addr src; // Source block |
| 241 | Addr dst; // Destination block |
| 242 | Int szB; // Size in bytes; 0 if unused. |
| 243 | } Overlap; |
| 244 | |
| 245 | // A memory leak. |
| 246 | struct { |
| 247 | UInt n_this_record; |
| 248 | UInt n_total_records; |
| 249 | LossRecord* lossRecord; |
| 250 | } Leak; |
| 251 | |
| 252 | // A memory pool error. |
| 253 | struct { |
| 254 | AddrInfo ai; |
| 255 | } IllegalMempool; |
| 256 | |
| 257 | } Err; |
| 258 | }; |
| 259 | |
| 260 | |
| 261 | /*------------------------------------------------------------*/ |
| 262 | /*--- Printing errors ---*/ |
| 263 | /*------------------------------------------------------------*/ |
| 264 | |
| 265 | static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc ) |
| 266 | { |
| 267 | HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " "; |
| 268 | HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : ""; |
| 269 | |
| 270 | switch (ai->tag) { |
| 271 | case Addr_Unknown: |
| 272 | if (maybe_gcc) { |
| 273 | VG_(message)(Vg_UserMsg, |
| 274 | "%sAddress 0x%llx is just below the stack ptr. " |
| 275 | "To suppress, use: --workaround-gcc296-bugs=yes%s", |
| 276 | xpre, (ULong)a, xpost |
| 277 | ); |
| 278 | } else { |
| 279 | VG_(message)(Vg_UserMsg, |
| 280 | "%sAddress 0x%llx " |
| 281 | "is not stack'd, malloc'd or (recently) free'd%s", |
| 282 | xpre, (ULong)a, xpost); |
| 283 | } |
| 284 | break; |
| 285 | |
| 286 | case Addr_Stack: |
| 287 | VG_(message)(Vg_UserMsg, |
| 288 | "%sAddress 0x%llx is on thread %d's stack%s", |
| 289 | xpre, (ULong)a, ai->Addr.Stack.tid, xpost); |
| 290 | break; |
| 291 | |
| 292 | case Addr_Block: { |
| 293 | SizeT block_szB = ai->Addr.Block.block_szB; |
| 294 | OffT rwoffset = ai->Addr.Block.rwoffset; |
| 295 | SizeT delta; |
| 296 | const Char* relative; |
| 297 | |
| 298 | if (rwoffset < 0) { |
| 299 | delta = (SizeT)(-rwoffset); |
| 300 | relative = "before"; |
| 301 | } else if (rwoffset >= block_szB) { |
| 302 | delta = rwoffset - block_szB; |
| 303 | relative = "after"; |
| 304 | } else { |
| 305 | delta = rwoffset; |
| 306 | relative = "inside"; |
| 307 | } |
| 308 | VG_(message)(Vg_UserMsg, |
| 309 | "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s", |
| 310 | xpre, |
| 311 | a, delta, relative, ai->Addr.Block.block_desc, |
| 312 | block_szB, |
| 313 | ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd" |
| 314 | : ai->Addr.Block.block_kind==Block_Freed ? "free'd" |
| 315 | : "client-defined", |
| 316 | xpost); |
| 317 | VG_(pp_ExeContext)(ai->Addr.Block.lastchange); |
| 318 | break; |
| 319 | } |
| 320 | |
| 321 | case Addr_DataSym: |
| 322 | VG_(message)(Vg_UserMsg, |
| 323 | "%sAddress 0x%llx is %llu bytes " |
| 324 | "inside data symbol \"%t\"%s", |
| 325 | xpre, |
| 326 | (ULong)a, |
| 327 | (ULong)ai->Addr.DataSym.offset, |
| 328 | ai->Addr.DataSym.name, |
| 329 | xpost); |
| 330 | break; |
| 331 | |
| 332 | case Addr_Variable: |
| 333 | if (ai->Addr.Variable.descr1[0] != '\0') |
| 334 | VG_(message)(Vg_UserMsg, "%s%s%s", |
| 335 | xpre, ai->Addr.Variable.descr1, xpost); |
| 336 | if (ai->Addr.Variable.descr2[0] != '\0') |
| 337 | VG_(message)(Vg_UserMsg, "%s%s%s", |
| 338 | xpre, ai->Addr.Variable.descr2, xpost); |
| 339 | break; |
| 340 | |
| 341 | case Addr_SectKind: |
| 342 | VG_(message)(Vg_UserMsg, |
| 343 | "%sAddress 0x%llx is in the %t segment of %t%s", |
| 344 | xpre, |
| 345 | (ULong)a, |
| 346 | VG_(pp_SectKind)(ai->Addr.SectKind.kind), |
| 347 | ai->Addr.SectKind.objname, |
| 348 | xpost); |
| 349 | break; |
| 350 | |
| 351 | default: |
| 352 | VG_(tool_panic)("mc_pp_AddrInfo"); |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | static const HChar* str_leak_lossmode ( Reachedness lossmode ) |
| 357 | { |
| 358 | const HChar *loss = "?"; |
| 359 | switch (lossmode) { |
| 360 | case Unreached: loss = "definitely lost"; break; |
| 361 | case IndirectLeak: loss = "indirectly lost"; break; |
| 362 | case Interior: loss = "possibly lost"; break; |
| 363 | case Proper: loss = "still reachable"; break; |
| 364 | } |
| 365 | return loss; |
| 366 | } |
| 367 | |
| 368 | static const HChar* xml_leak_kind ( Reachedness lossmode ) |
| 369 | { |
| 370 | const HChar *loss = "?"; |
| 371 | switch (lossmode) { |
| 372 | case Unreached: loss = "Leak_DefinitelyLost"; break; |
| 373 | case IndirectLeak: loss = "Leak_IndirectlyLost"; break; |
| 374 | case Interior: loss = "Leak_PossiblyLost"; break; |
| 375 | case Proper: loss = "Leak_StillReachable"; break; |
| 376 | } |
| 377 | return loss; |
| 378 | } |
| 379 | |
| 380 | static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... ) |
| 381 | { |
| 382 | HChar* xpre = VG_(clo_xml) ? " <what>" : ""; |
| 383 | HChar* xpost = VG_(clo_xml) ? "</what>" : ""; |
| 384 | Char buf[256]; |
| 385 | va_list vargs; |
| 386 | |
| 387 | if (VG_(clo_xml)) |
| 388 | VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_name); |
| 389 | // Stick xpre and xpost on the front and back of the format string. |
| 390 | VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost); |
| 391 | va_start(vargs, format); |
| 392 | VG_(vmessage) ( Vg_UserMsg, buf, vargs ); |
| 393 | va_end(vargs); |
| 394 | VG_(pp_ExeContext)( VG_(get_error_where)(err) ); |
| 395 | } |
| 396 | |
| 397 | static void mc_pp_origin ( ExeContext* ec, UInt okind ) |
| 398 | { |
| 399 | HChar* src = NULL; |
| 400 | HChar* xpre = VG_(clo_xml) ? " <what>" : " "; |
| 401 | HChar* xpost = VG_(clo_xml) ? "</what>" : ""; |
| 402 | tl_assert(ec); |
| 403 | |
| 404 | switch (okind) { |
| 405 | case MC_OKIND_STACK: src = " by a stack allocation"; break; |
| 406 | case MC_OKIND_HEAP: src = " by a heap allocation"; break; |
| 407 | case MC_OKIND_USER: src = " by a client request"; break; |
| 408 | case MC_OKIND_UNKNOWN: src = ""; break; |
| 409 | } |
| 410 | tl_assert(src); /* guards against invalid 'okind' */ |
| 411 | |
| 412 | if (VG_(clo_xml)) { |
| 413 | VG_(message)(Vg_UserMsg, " <origin>"); |
| 414 | } |
| 415 | |
| 416 | VG_(message)(Vg_UserMsg, "%sUninitialised value was created%s%s", |
| 417 | xpre, src, xpost); |
| 418 | VG_(pp_ExeContext)( ec ); |
| 419 | if (VG_(clo_xml)) { |
| 420 | VG_(message)(Vg_UserMsg, " </origin>"); |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | void MC_(pp_Error) ( Error* err ) |
| 425 | { |
| 426 | MC_Error* extra = VG_(get_error_extra)(err); |
| 427 | |
| 428 | switch (VG_(get_error_kind)(err)) { |
| 429 | case Err_CoreMem: { |
| 430 | /* What the hell *is* a CoreMemError? jrs 2005-May-18 */ |
| 431 | /* As of 2006-Dec-14, it's caused by unaddressable bytes in a |
| 432 | signal handler frame. --njn */ |
| 433 | mc_pp_msg("CoreMemError", err, |
| 434 | "%s contains unaddressable byte(s)", |
| 435 | VG_(get_error_string)(err)); |
| 436 | break; |
| 437 | } |
| 438 | |
| 439 | case Err_Value: |
| 440 | MC_(any_value_errors) = True; |
| 441 | if (1 || extra->Err.Value.otag == 0) { |
| 442 | mc_pp_msg("UninitValue", err, |
| 443 | "Use of uninitialised value of size %d", |
| 444 | extra->Err.Value.szB); |
| 445 | } else { |
| 446 | mc_pp_msg("UninitValue", err, |
| 447 | "Use of uninitialised value of size %d (otag %u)", |
| 448 | extra->Err.Value.szB, extra->Err.Value.otag); |
| 449 | } |
| 450 | if (extra->Err.Value.origin_ec) |
| 451 | mc_pp_origin( extra->Err.Value.origin_ec, |
| 452 | extra->Err.Value.otag & 3 ); |
| 453 | break; |
| 454 | |
| 455 | case Err_Cond: |
| 456 | MC_(any_value_errors) = True; |
| 457 | if (1 || extra->Err.Cond.otag == 0) { |
| 458 | mc_pp_msg("UninitCondition", err, |
| 459 | "Conditional jump or move depends" |
| 460 | " on uninitialised value(s)"); |
| 461 | } else { |
| 462 | mc_pp_msg("UninitCondition", err, |
| 463 | "Conditional jump or move depends" |
| 464 | " on uninitialised value(s) (otag %u)", |
| 465 | extra->Err.Cond.otag); |
| 466 | } |
| 467 | if (extra->Err.Cond.origin_ec) |
| 468 | mc_pp_origin( extra->Err.Cond.origin_ec, |
| 469 | extra->Err.Cond.otag & 3 ); |
| 470 | break; |
| 471 | |
| 472 | case Err_RegParam: |
| 473 | MC_(any_value_errors) = True; |
| 474 | mc_pp_msg("SyscallParam", err, |
| 475 | "Syscall param %s contains uninitialised byte(s)", |
| 476 | VG_(get_error_string)(err)); |
| 477 | if (extra->Err.RegParam.origin_ec) |
| 478 | mc_pp_origin( extra->Err.RegParam.origin_ec, |
| 479 | extra->Err.RegParam.otag & 3 ); |
| 480 | break; |
| 481 | |
| 482 | case Err_MemParam: |
| 483 | if (!extra->Err.MemParam.isAddrErr) |
| 484 | MC_(any_value_errors) = True; |
| 485 | mc_pp_msg("SyscallParam", err, |
| 486 | "Syscall param %s points to %s byte(s)", |
| 487 | VG_(get_error_string)(err), |
| 488 | ( extra->Err.MemParam.isAddrErr |
| 489 | ? "unaddressable" : "uninitialised" )); |
| 490 | mc_pp_AddrInfo(VG_(get_error_address)(err), |
| 491 | &extra->Err.MemParam.ai, False); |
| 492 | if (extra->Err.MemParam.origin_ec && !extra->Err.MemParam.isAddrErr) |
| 493 | mc_pp_origin( extra->Err.MemParam.origin_ec, |
| 494 | extra->Err.MemParam.otag & 3 ); |
| 495 | break; |
| 496 | |
| 497 | case Err_User: |
| 498 | if (!extra->Err.User.isAddrErr) |
| 499 | MC_(any_value_errors) = True; |
| 500 | mc_pp_msg("ClientCheck", err, |
| 501 | "%s byte(s) found during client check request", |
| 502 | ( extra->Err.User.isAddrErr |
| 503 | ? "Unaddressable" : "Uninitialised" )); |
| 504 | mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai, |
| 505 | False); |
| 506 | if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr) |
| 507 | mc_pp_origin( extra->Err.User.origin_ec, |
| 508 | extra->Err.User.otag & 3 ); |
| 509 | break; |
| 510 | |
| 511 | case Err_Free: |
| 512 | mc_pp_msg("InvalidFree", err, |
| 513 | "Invalid free() / delete / delete[]"); |
| 514 | mc_pp_AddrInfo(VG_(get_error_address)(err), |
| 515 | &extra->Err.Free.ai, False); |
| 516 | break; |
| 517 | |
| 518 | case Err_FreeMismatch: |
| 519 | mc_pp_msg("MismatchedFree", err, |
| 520 | "Mismatched free() / delete / delete []"); |
| 521 | mc_pp_AddrInfo(VG_(get_error_address)(err), |
| 522 | &extra->Err.FreeMismatch.ai, False); |
| 523 | break; |
| 524 | |
| 525 | case Err_Addr: |
| 526 | if (extra->Err.Addr.isWrite) { |
| 527 | mc_pp_msg("InvalidWrite", err, |
| 528 | "Invalid write of size %d", |
| 529 | extra->Err.Addr.szB); |
| 530 | } else { |
| 531 | mc_pp_msg("InvalidRead", err, |
| 532 | "Invalid read of size %d", |
| 533 | extra->Err.Addr.szB); |
| 534 | } |
| 535 | mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai, |
| 536 | extra->Err.Addr.maybe_gcc); |
| 537 | break; |
| 538 | |
| 539 | case Err_Jump: |
| 540 | mc_pp_msg("InvalidJump", err, |
| 541 | "Jump to the invalid address stated on the next line"); |
| 542 | mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai, |
| 543 | False); |
| 544 | break; |
| 545 | |
| 546 | case Err_Overlap: |
| 547 | if (extra->Err.Overlap.szB == 0) |
| 548 | mc_pp_msg("Overlap", err, |
| 549 | "Source and destination overlap in %s(%p, %p)", |
| 550 | VG_(get_error_string)(err), |
| 551 | extra->Err.Overlap.dst, extra->Err.Overlap.src); |
| 552 | else |
| 553 | mc_pp_msg("Overlap", err, |
| 554 | "Source and destination overlap in %s(%p, %p, %d)", |
| 555 | VG_(get_error_string)(err), |
| 556 | extra->Err.Overlap.dst, extra->Err.Overlap.src, |
| 557 | extra->Err.Overlap.szB); |
| 558 | break; |
| 559 | |
| 560 | case Err_IllegalMempool: |
| 561 | mc_pp_msg("InvalidMemPool", err, |
| 562 | "Illegal memory pool address"); |
| 563 | mc_pp_AddrInfo(VG_(get_error_address)(err), |
| 564 | &extra->Err.IllegalMempool.ai, False); |
| 565 | break; |
| 566 | |
| 567 | case Err_Leak: { |
| 568 | HChar* xpre = VG_(clo_xml) ? " <what>" : ""; |
| 569 | HChar* xpost = VG_(clo_xml) ? "</what>" : ""; |
| 570 | UInt n_this_record = extra->Err.Leak.n_this_record; |
| 571 | UInt n_total_records = extra->Err.Leak.n_total_records; |
| 572 | LossRecord* l = extra->Err.Leak.lossRecord; |
| 573 | |
| 574 | if (VG_(clo_xml)) { |
| 575 | VG_(message)(Vg_UserMsg, " <kind>%t</kind>", |
| 576 | xml_leak_kind(l->loss_mode)); |
| 577 | } else { |
| 578 | VG_(message)(Vg_UserMsg, ""); |
| 579 | } |
| 580 | |
| 581 | if (l->indirect_bytes) { |
| 582 | VG_(message)(Vg_UserMsg, |
| 583 | "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks" |
| 584 | " are %s in loss record %,u of %,u%s", |
| 585 | xpre, |
| 586 | l->total_bytes + l->indirect_bytes, |
| 587 | l->total_bytes, l->indirect_bytes, l->num_blocks, |
| 588 | str_leak_lossmode(l->loss_mode), n_this_record, n_total_records, |
| 589 | xpost |
| 590 | ); |
| 591 | if (VG_(clo_xml)) { |
| 592 | // Nb: don't put commas in these XML numbers |
| 593 | VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>", |
| 594 | l->total_bytes + l->indirect_bytes); |
| 595 | VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>", |
| 596 | l->num_blocks); |
| 597 | } |
| 598 | } else { |
| 599 | VG_(message)( |
| 600 | Vg_UserMsg, |
| 601 | "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s", |
| 602 | xpre, |
| 603 | l->total_bytes, l->num_blocks, |
| 604 | str_leak_lossmode(l->loss_mode), n_this_record, n_total_records, |
| 605 | xpost |
| 606 | ); |
| 607 | if (VG_(clo_xml)) { |
| 608 | VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>", |
| 609 | l->total_bytes); |
| 610 | VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>", |
| 611 | l->num_blocks); |
| 612 | } |
| 613 | } |
| 614 | VG_(pp_ExeContext)(l->allocated_at); |
| 615 | break; |
| 616 | } |
| 617 | |
| 618 | default: |
| 619 | VG_(printf)("Error:\n unknown Memcheck error code %d\n", |
| 620 | VG_(get_error_kind)(err)); |
| 621 | VG_(tool_panic)("unknown error code in mc_pp_Error)"); |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | /*------------------------------------------------------------*/ |
| 626 | /*--- Recording errors ---*/ |
| 627 | /*------------------------------------------------------------*/ |
| 628 | |
| 629 | /* These many bytes below %ESP are considered addressible if we're |
| 630 | doing the --workaround-gcc296-bugs hack. */ |
| 631 | #define VG_GCC296_BUG_STACK_SLOP 1024 |
| 632 | |
| 633 | /* Is this address within some small distance below %ESP? Used only |
| 634 | for the --workaround-gcc296-bugs kludge. */ |
| 635 | static Bool is_just_below_ESP( Addr esp, Addr aa ) |
| 636 | { |
| 637 | if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP) |
| 638 | return True; |
| 639 | else |
| 640 | return False; |
| 641 | } |
| 642 | |
| 643 | /* --- Called from generated and non-generated code --- */ |
| 644 | |
| 645 | void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB, |
| 646 | Bool isWrite ) |
| 647 | { |
| 648 | MC_Error extra; |
| 649 | Bool just_below_esp; |
| 650 | |
| 651 | if (MC_(in_ignored_range)(a)) |
| 652 | return; |
| 653 | |
| 654 | # if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5) |
| 655 | /* AIX zero-page handling. On AIX, reads from page zero are, |
| 656 | bizarrely enough, legitimate. Writes to page zero aren't, |
| 657 | though. Since memcheck can't distinguish reads from writes, the |
| 658 | best we can do is to 'act normal' and mark the A bits in the |
| 659 | normal way as noaccess, but then hide any reads from that page |
| 660 | that get reported here. */ |
| 661 | if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096) |
| 662 | return; |
| 663 | |
| 664 | /* Appalling AIX hack. It suppresses reads done by glink |
| 665 | fragments. Getting rid of this would require figuring out |
| 666 | somehow where the referenced data areas are (and their |
| 667 | sizes). */ |
| 668 | if ((!isWrite) && szB == sizeof(Word)) { |
| 669 | UInt i1, i2; |
| 670 | UInt* pc = (UInt*)VG_(get_IP)(tid); |
| 671 | if (sizeof(Word) == 4) { |
| 672 | i1 = 0x800c0000; /* lwz r0,0(r12) */ |
| 673 | i2 = 0x804c0004; /* lwz r2,4(r12) */ |
| 674 | } else { |
| 675 | i1 = 0xe80c0000; /* ld r0,0(r12) */ |
| 676 | i2 = 0xe84c0008; /* ld r2,8(r12) */ |
| 677 | } |
| 678 | if (pc[0] == i1 && pc[1] == i2) return; |
| 679 | if (pc[0] == i2 && pc[-1] == i1) return; |
| 680 | } |
| 681 | # endif |
| 682 | |
| 683 | just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a ); |
| 684 | |
| 685 | /* If this is caused by an access immediately below %ESP, and the |
| 686 | user asks nicely, we just ignore it. */ |
| 687 | if (MC_(clo_workaround_gcc296_bugs) && just_below_esp) |
| 688 | return; |
| 689 | |
| 690 | extra.Err.Addr.isWrite = isWrite; |
| 691 | extra.Err.Addr.szB = szB; |
| 692 | extra.Err.Addr.maybe_gcc = just_below_esp; |
| 693 | extra.Err.Addr.ai.tag = Addr_Undescribed; |
| 694 | VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra ); |
| 695 | } |
| 696 | |
| 697 | void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag ) |
| 698 | { |
| 699 | MC_Error extra; |
| 700 | tl_assert( MC_(clo_mc_level) >= 2 ); |
| 701 | if (otag > 0) |
| 702 | tl_assert( MC_(clo_mc_level) == 3 ); |
| 703 | extra.Err.Value.szB = szB; |
| 704 | extra.Err.Value.otag = otag; |
| 705 | extra.Err.Value.origin_ec = NULL; /* Filled in later */ |
| 706 | VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra ); |
| 707 | } |
| 708 | |
| 709 | void MC_(record_cond_error) ( ThreadId tid, UInt otag ) |
| 710 | { |
| 711 | MC_Error extra; |
| 712 | tl_assert( MC_(clo_mc_level) >= 2 ); |
| 713 | if (otag > 0) |
| 714 | tl_assert( MC_(clo_mc_level) == 3 ); |
| 715 | extra.Err.Cond.otag = otag; |
| 716 | extra.Err.Cond.origin_ec = NULL; /* Filled in later */ |
| 717 | VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra ); |
| 718 | } |
| 719 | |
| 720 | /* --- Called from non-generated code --- */ |
| 721 | |
| 722 | /* This is for memory errors in pthread functions, as opposed to pthread API |
| 723 | errors which are found by the core. */ |
| 724 | void MC_(record_core_mem_error) ( ThreadId tid, Bool isAddrErr, Char* msg ) |
| 725 | { |
| 726 | VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL ); |
| 727 | } |
| 728 | |
| 729 | void MC_(record_regparam_error) ( ThreadId tid, Char* msg, UInt otag ) |
| 730 | { |
| 731 | MC_Error extra; |
| 732 | tl_assert(VG_INVALID_THREADID != tid); |
| 733 | if (otag > 0) |
| 734 | tl_assert( MC_(clo_mc_level) == 3 ); |
| 735 | extra.Err.RegParam.otag = otag; |
| 736 | extra.Err.RegParam.origin_ec = NULL; /* Filled in later */ |
| 737 | VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra ); |
| 738 | } |
| 739 | |
| 740 | void MC_(record_memparam_error) ( ThreadId tid, Addr a, |
| 741 | Bool isAddrErr, Char* msg, UInt otag ) |
| 742 | { |
| 743 | MC_Error extra; |
| 744 | tl_assert(VG_INVALID_THREADID != tid); |
| 745 | if (!isAddrErr) |
| 746 | tl_assert( MC_(clo_mc_level) >= 2 ); |
| 747 | if (otag != 0) { |
| 748 | tl_assert( MC_(clo_mc_level) == 3 ); |
| 749 | tl_assert( !isAddrErr ); |
| 750 | } |
| 751 | extra.Err.MemParam.isAddrErr = isAddrErr; |
| 752 | extra.Err.MemParam.ai.tag = Addr_Undescribed; |
| 753 | extra.Err.MemParam.otag = otag; |
| 754 | extra.Err.MemParam.origin_ec = NULL; /* Filled in later */ |
| 755 | VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra ); |
| 756 | } |
| 757 | |
| 758 | void MC_(record_jump_error) ( ThreadId tid, Addr a ) |
| 759 | { |
| 760 | MC_Error extra; |
| 761 | tl_assert(VG_INVALID_THREADID != tid); |
| 762 | extra.Err.Jump.ai.tag = Addr_Undescribed; |
| 763 | VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra ); |
| 764 | } |
| 765 | |
| 766 | void MC_(record_free_error) ( ThreadId tid, Addr a ) |
| 767 | { |
| 768 | MC_Error extra; |
| 769 | tl_assert(VG_INVALID_THREADID != tid); |
| 770 | extra.Err.Free.ai.tag = Addr_Undescribed; |
| 771 | VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra ); |
| 772 | } |
| 773 | |
| 774 | void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc ) |
| 775 | { |
| 776 | MC_Error extra; |
| 777 | AddrInfo* ai = &extra.Err.FreeMismatch.ai; |
| 778 | tl_assert(VG_INVALID_THREADID != tid); |
| 779 | ai->tag = Addr_Block; |
| 780 | ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed' |
| 781 | ai->Addr.Block.block_desc = "block"; |
| 782 | ai->Addr.Block.block_szB = mc->szB; |
| 783 | ai->Addr.Block.rwoffset = 0; |
| 784 | ai->Addr.Block.lastchange = mc->where; |
| 785 | VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL, |
| 786 | &extra ); |
| 787 | } |
| 788 | |
| 789 | void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a ) |
| 790 | { |
| 791 | MC_Error extra; |
| 792 | tl_assert(VG_INVALID_THREADID != tid); |
| 793 | extra.Err.IllegalMempool.ai.tag = Addr_Undescribed; |
| 794 | VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra ); |
| 795 | } |
| 796 | |
| 797 | void MC_(record_overlap_error) ( ThreadId tid, Char* function, |
| 798 | Addr src, Addr dst, SizeT szB ) |
| 799 | { |
| 800 | MC_Error extra; |
| 801 | tl_assert(VG_INVALID_THREADID != tid); |
| 802 | extra.Err.Overlap.src = src; |
| 803 | extra.Err.Overlap.dst = dst; |
| 804 | extra.Err.Overlap.szB = szB; |
| 805 | VG_(maybe_record_error)( |
| 806 | tid, Err_Overlap, /*addr*/0, /*s*/function, &extra ); |
| 807 | } |
| 808 | |
| 809 | Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record, |
| 810 | UInt n_total_records, LossRecord* lossRecord, |
| 811 | Bool print_record ) |
| 812 | { |
| 813 | MC_Error extra; |
| 814 | extra.Err.Leak.n_this_record = n_this_record; |
| 815 | extra.Err.Leak.n_total_records = n_total_records; |
| 816 | extra.Err.Leak.lossRecord = lossRecord; |
| 817 | return |
| 818 | VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra, |
| 819 | lossRecord->allocated_at, print_record, |
| 820 | /*allow_GDB_attach*/False, /*count_error*/False ); |
| 821 | } |
| 822 | |
| 823 | void MC_(record_user_error) ( ThreadId tid, Addr a, |
| 824 | Bool isAddrErr, UInt otag ) |
| 825 | { |
| 826 | MC_Error extra; |
| 827 | if (otag != 0) { |
| 828 | tl_assert(!isAddrErr); |
| 829 | tl_assert( MC_(clo_mc_level) == 3 ); |
| 830 | } |
| 831 | if (!isAddrErr) { |
| 832 | tl_assert( MC_(clo_mc_level) >= 2 ); |
| 833 | } |
| 834 | tl_assert(VG_INVALID_THREADID != tid); |
| 835 | extra.Err.User.isAddrErr = isAddrErr; |
| 836 | extra.Err.User.ai.tag = Addr_Undescribed; |
| 837 | extra.Err.User.otag = otag; |
| 838 | extra.Err.User.origin_ec = NULL; /* Filled in later */ |
| 839 | VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra ); |
| 840 | } |
| 841 | |
| 842 | /*------------------------------------------------------------*/ |
| 843 | /*--- Other error operations ---*/ |
| 844 | /*------------------------------------------------------------*/ |
| 845 | |
| 846 | /* Compare error contexts, to detect duplicates. Note that if they |
| 847 | are otherwise the same, the faulting addrs and associated rwoffsets |
| 848 | are allowed to be different. */ |
| 849 | Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 ) |
| 850 | { |
| 851 | MC_Error* extra1 = VG_(get_error_extra)(e1); |
| 852 | MC_Error* extra2 = VG_(get_error_extra)(e2); |
| 853 | |
| 854 | /* Guaranteed by calling function */ |
| 855 | tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2)); |
| 856 | |
| 857 | switch (VG_(get_error_kind)(e1)) { |
| 858 | case Err_CoreMem: { |
| 859 | Char *e1s, *e2s; |
| 860 | e1s = VG_(get_error_string)(e1); |
| 861 | e2s = VG_(get_error_string)(e2); |
| 862 | if (e1s == e2s) return True; |
| 863 | if (VG_STREQ(e1s, e2s)) return True; |
| 864 | return False; |
| 865 | } |
| 866 | |
| 867 | case Err_RegParam: |
| 868 | return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2)); |
| 869 | |
| 870 | // Perhaps we should also check the addrinfo.akinds for equality. |
| 871 | // That would result in more error reports, but only in cases where |
| 872 | // a register contains uninitialised bytes and points to memory |
| 873 | // containing uninitialised bytes. Currently, the 2nd of those to be |
| 874 | // detected won't be reported. That is (nearly?) always the memory |
| 875 | // error, which is good. |
| 876 | case Err_MemParam: |
| 877 | if (!VG_STREQ(VG_(get_error_string)(e1), |
| 878 | VG_(get_error_string)(e2))) return False; |
| 879 | // fall through |
| 880 | case Err_User: |
| 881 | return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr |
| 882 | ? True : False ); |
| 883 | |
| 884 | case Err_Free: |
| 885 | case Err_FreeMismatch: |
| 886 | case Err_Jump: |
| 887 | case Err_IllegalMempool: |
| 888 | case Err_Overlap: |
| 889 | case Err_Cond: |
| 890 | return True; |
| 891 | |
| 892 | case Err_Addr: |
| 893 | return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB |
| 894 | ? True : False ); |
| 895 | |
| 896 | case Err_Value: |
| 897 | return ( extra1->Err.Value.szB == extra2->Err.Value.szB |
| 898 | ? True : False ); |
| 899 | |
| 900 | case Err_Leak: |
| 901 | VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n" |
| 902 | "since it's handled with VG_(unique_error)()!"); |
| 903 | |
| 904 | default: |
| 905 | VG_(printf)("Error:\n unknown error code %d\n", |
| 906 | VG_(get_error_kind)(e1)); |
| 907 | VG_(tool_panic)("unknown error code in mc_eq_Error"); |
| 908 | } |
| 909 | } |
| 910 | |
| 911 | /* Function used when searching MC_Chunk lists */ |
| 912 | static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a) |
| 913 | { |
| 914 | // Nb: this is not quite right! It assumes that the heap block has |
| 915 | // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd |
| 916 | // blocks, but not necessarily true for custom-alloc'd blocks. So |
| 917 | // in some cases this could result in an incorrect description (eg. |
| 918 | // saying "12 bytes after block A" when really it's within block B. |
| 919 | // Fixing would require adding redzone size to MC_Chunks, though. |
| 920 | return VG_(addr_is_in_block)( a, mc->data, mc->szB, |
| 921 | MC_MALLOC_REDZONE_SZB ); |
| 922 | } |
| 923 | |
| 924 | // Forward declaration |
| 925 | static Bool client_block_maybe_describe( Addr a, AddrInfo* ai ); |
| 926 | |
| 927 | |
| 928 | /* Describe an address as best you can, for error messages, |
| 929 | putting the result in ai. */ |
| 930 | static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai ) |
| 931 | { |
| 932 | MC_Chunk* mc; |
| 933 | ThreadId tid; |
| 934 | Addr stack_min, stack_max; |
| 935 | VgSectKind sect; |
| 936 | |
| 937 | tl_assert(Addr_Undescribed == ai->tag); |
| 938 | |
| 939 | /* Perhaps it's a user-def'd block? */ |
| 940 | if (client_block_maybe_describe( a, ai )) { |
| 941 | return; |
| 942 | } |
| 943 | /* Search for a recently freed block which might bracket it. */ |
| 944 | mc = MC_(get_freed_list_head)(); |
| 945 | while (mc) { |
| 946 | if (addr_is_in_MC_Chunk(mc, a)) { |
| 947 | ai->tag = Addr_Block; |
| 948 | ai->Addr.Block.block_kind = Block_Freed; |
| 949 | ai->Addr.Block.block_desc = "block"; |
| 950 | ai->Addr.Block.block_szB = mc->szB; |
| 951 | ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data; |
| 952 | ai->Addr.Block.lastchange = mc->where; |
| 953 | return; |
| 954 | } |
| 955 | mc = mc->next; |
| 956 | } |
| 957 | /* Search for a currently malloc'd block which might bracket it. */ |
| 958 | VG_(HT_ResetIter)(MC_(malloc_list)); |
| 959 | while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) { |
| 960 | if (addr_is_in_MC_Chunk(mc, a)) { |
| 961 | ai->tag = Addr_Block; |
| 962 | ai->Addr.Block.block_kind = Block_Mallocd; |
| 963 | ai->Addr.Block.block_desc = "block"; |
| 964 | ai->Addr.Block.block_szB = mc->szB; |
| 965 | ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data; |
| 966 | ai->Addr.Block.lastchange = mc->where; |
| 967 | return; |
| 968 | } |
| 969 | } |
| 970 | /* Perhaps the variable type/location data describes it? */ |
| 971 | tl_assert(sizeof(ai->Addr.Variable.descr1) |
| 972 | == sizeof(ai->Addr.Variable.descr2)); |
| 973 | VG_(memset)( &ai->Addr.Variable.descr1, |
| 974 | 0, sizeof(ai->Addr.Variable.descr1)); |
| 975 | VG_(memset)( &ai->Addr.Variable.descr2, |
| 976 | 0, sizeof(ai->Addr.Variable.descr2)); |
| 977 | if (VG_(get_data_description)( |
| 978 | &ai->Addr.Variable.descr1[0], |
| 979 | &ai->Addr.Variable.descr2[0], |
| 980 | sizeof(ai->Addr.Variable.descr1)-1, |
| 981 | a )) { |
| 982 | ai->tag = Addr_Variable; |
| 983 | tl_assert( ai->Addr.Variable.descr1 |
| 984 | [ sizeof(ai->Addr.Variable.descr1)-1 ] == 0); |
| 985 | tl_assert( ai->Addr.Variable.descr2 |
| 986 | [ sizeof(ai->Addr.Variable.descr2)-1 ] == 0); |
| 987 | return; |
| 988 | } |
| 989 | /* Have a look at the low level data symbols - perhaps it's in |
| 990 | there. */ |
| 991 | VG_(memset)( &ai->Addr.DataSym.name, |
| 992 | 0, sizeof(ai->Addr.DataSym.name)); |
| 993 | if (VG_(get_datasym_and_offset)( |
| 994 | a, &ai->Addr.DataSym.name[0], |
| 995 | sizeof(ai->Addr.DataSym.name)-1, |
| 996 | &ai->Addr.DataSym.offset )) { |
| 997 | ai->tag = Addr_DataSym; |
| 998 | tl_assert( ai->Addr.DataSym.name |
| 999 | [ sizeof(ai->Addr.DataSym.name)-1 ] == 0); |
| 1000 | return; |
| 1001 | } |
| 1002 | /* Perhaps it's on a thread's stack? */ |
| 1003 | VG_(thread_stack_reset_iter)(&tid); |
| 1004 | while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) { |
| 1005 | if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) { |
| 1006 | ai->tag = Addr_Stack; |
| 1007 | ai->Addr.Stack.tid = tid; |
| 1008 | return; |
| 1009 | } |
| 1010 | } |
| 1011 | /* last ditch attempt at classification */ |
| 1012 | tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 ); |
| 1013 | VG_(memset)( &ai->Addr.SectKind.objname, |
| 1014 | 0, sizeof(ai->Addr.SectKind.objname)); |
| 1015 | VG_(strcpy)( ai->Addr.SectKind.objname, "???" ); |
| 1016 | sect = VG_(seginfo_sect_kind)( &ai->Addr.SectKind.objname[0], |
| 1017 | sizeof(ai->Addr.SectKind.objname)-1, a); |
| 1018 | if (sect != Vg_SectUnknown) { |
| 1019 | ai->tag = Addr_SectKind; |
| 1020 | ai->Addr.SectKind.kind = sect; |
| 1021 | tl_assert( ai->Addr.SectKind.objname |
| 1022 | [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0); |
| 1023 | return; |
| 1024 | } |
| 1025 | /* Clueless ... */ |
| 1026 | ai->tag = Addr_Unknown; |
| 1027 | return; |
| 1028 | } |
| 1029 | |
| 1030 | /* Fill in *origin_ec as specified by otag, or NULL it out if otag |
| 1031 | does not refer to a known origin. */ |
| 1032 | static void update_origin ( /*OUT*/ExeContext** origin_ec, |
| 1033 | UInt otag ) |
| 1034 | { |
| 1035 | UInt ecu = otag & ~3; |
| 1036 | *origin_ec = NULL; |
| 1037 | if (VG_(is_plausible_ECU)(ecu)) { |
| 1038 | *origin_ec = VG_(get_ExeContext_from_ECU)( ecu ); |
| 1039 | } |
| 1040 | } |
| 1041 | |
| 1042 | /* Updates the copy with address info if necessary (but not for all errors). */ |
| 1043 | UInt MC_(update_Error_extra)( Error* err ) |
| 1044 | { |
| 1045 | MC_Error* extra = VG_(get_error_extra)(err); |
| 1046 | |
| 1047 | switch (VG_(get_error_kind)(err)) { |
| 1048 | // These ones don't have addresses associated with them, and so don't |
| 1049 | // need any updating. |
| 1050 | case Err_CoreMem: |
| 1051 | //case Err_Value: |
| 1052 | //case Err_Cond: |
| 1053 | case Err_Overlap: |
| 1054 | // For Err_Leaks the returned size does not matter -- they are always |
| 1055 | // shown with VG_(unique_error)() so they 'extra' not copied. But |
| 1056 | // we make it consistent with the others. |
| 1057 | case Err_Leak: |
| 1058 | return sizeof(MC_Error); |
| 1059 | |
| 1060 | // For value errors, get the ExeContext corresponding to the |
| 1061 | // origin tag. Note that it is a kludge to assume that |
| 1062 | // a length-1 trace indicates a stack origin. FIXME. |
| 1063 | case Err_Value: |
| 1064 | update_origin( &extra->Err.Value.origin_ec, |
| 1065 | extra->Err.Value.otag ); |
| 1066 | return sizeof(MC_Error); |
| 1067 | case Err_Cond: |
| 1068 | update_origin( &extra->Err.Cond.origin_ec, |
| 1069 | extra->Err.Cond.otag ); |
| 1070 | return sizeof(MC_Error); |
| 1071 | case Err_RegParam: |
| 1072 | update_origin( &extra->Err.RegParam.origin_ec, |
| 1073 | extra->Err.RegParam.otag ); |
| 1074 | return sizeof(MC_Error); |
| 1075 | |
| 1076 | // These ones always involve a memory address. |
| 1077 | case Err_Addr: |
| 1078 | describe_addr ( VG_(get_error_address)(err), |
| 1079 | &extra->Err.Addr.ai ); |
| 1080 | return sizeof(MC_Error); |
| 1081 | case Err_MemParam: |
| 1082 | describe_addr ( VG_(get_error_address)(err), |
| 1083 | &extra->Err.MemParam.ai ); |
| 1084 | update_origin( &extra->Err.MemParam.origin_ec, |
| 1085 | extra->Err.MemParam.otag ); |
| 1086 | return sizeof(MC_Error); |
| 1087 | case Err_Jump: |
| 1088 | describe_addr ( VG_(get_error_address)(err), |
| 1089 | &extra->Err.Jump.ai ); |
| 1090 | return sizeof(MC_Error); |
| 1091 | case Err_User: |
| 1092 | describe_addr ( VG_(get_error_address)(err), |
| 1093 | &extra->Err.User.ai ); |
| 1094 | update_origin( &extra->Err.User.origin_ec, |
| 1095 | extra->Err.User.otag ); |
| 1096 | return sizeof(MC_Error); |
| 1097 | case Err_Free: |
| 1098 | describe_addr ( VG_(get_error_address)(err), |
| 1099 | &extra->Err.Free.ai ); |
| 1100 | return sizeof(MC_Error); |
| 1101 | case Err_IllegalMempool: |
| 1102 | describe_addr ( VG_(get_error_address)(err), |
| 1103 | &extra->Err.IllegalMempool.ai ); |
| 1104 | return sizeof(MC_Error); |
| 1105 | |
| 1106 | // Err_FreeMismatches have already had their address described; this is |
| 1107 | // possible because we have the MC_Chunk on hand when the error is |
| 1108 | // detected. However, the address may be part of a user block, and if so |
| 1109 | // we override the pre-determined description with a user block one. |
| 1110 | case Err_FreeMismatch: { |
| 1111 | tl_assert(extra && Block_Mallocd == |
| 1112 | extra->Err.FreeMismatch.ai.Addr.Block.block_kind); |
| 1113 | (void)client_block_maybe_describe( VG_(get_error_address)(err), |
| 1114 | &extra->Err.FreeMismatch.ai ); |
| 1115 | return sizeof(MC_Error); |
| 1116 | } |
| 1117 | |
| 1118 | default: VG_(tool_panic)("mc_update_extra: bad errkind"); |
| 1119 | } |
| 1120 | } |
| 1121 | |
| 1122 | // FIXME: does this perhaps want to live somewhere else |
| 1123 | // in this file? |
| 1124 | static Bool client_block_maybe_describe( Addr a, |
| 1125 | /*OUT*/AddrInfo* ai ) |
| 1126 | { |
| 1127 | UInt i; |
| 1128 | |
| 1129 | CGenBlock* cgbs = NULL; |
| 1130 | UWord cgb_used = 0; |
| 1131 | MC_(get_ClientBlock_array)( &cgbs, &cgb_used ); |
| 1132 | if (cgbs == NULL) |
| 1133 | tl_assert(cgb_used == 0); |
| 1134 | |
| 1135 | /* Perhaps it's a general block ? */ |
| 1136 | for (i = 0; i < cgb_used; i++) { |
| 1137 | if (cgbs[i].start == 0 && cgbs[i].size == 0) |
| 1138 | continue; |
| 1139 | // Use zero as the redzone for client blocks. |
| 1140 | if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) { |
| 1141 | /* OK - maybe it's a mempool, too? */ |
| 1142 | MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list), |
| 1143 | (UWord)cgbs[i].start); |
| 1144 | if (mp != NULL) { |
| 1145 | if (mp->chunks != NULL) { |
| 1146 | MC_Chunk* mc; |
| 1147 | VG_(HT_ResetIter)(mp->chunks); |
| 1148 | while ( (mc = VG_(HT_Next)(mp->chunks)) ) { |
| 1149 | if (addr_is_in_MC_Chunk(mc, a)) { |
| 1150 | ai->tag = Addr_Block; |
| 1151 | ai->Addr.Block.block_kind = Block_MempoolChunk; |
| 1152 | ai->Addr.Block.block_desc = "block"; |
| 1153 | ai->Addr.Block.block_szB = mc->szB; |
| 1154 | ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data; |
| 1155 | ai->Addr.Block.lastchange = mc->where; |
| 1156 | return True; |
| 1157 | } |
| 1158 | } |
| 1159 | } |
| 1160 | ai->tag = Addr_Block; |
| 1161 | ai->Addr.Block.block_kind = Block_Mempool; |
| 1162 | ai->Addr.Block.block_desc = "mempool"; |
| 1163 | ai->Addr.Block.block_szB = cgbs[i].size; |
| 1164 | ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start); |
| 1165 | ai->Addr.Block.lastchange = cgbs[i].where; |
| 1166 | return True; |
| 1167 | } |
| 1168 | ai->tag = Addr_Block; |
| 1169 | ai->Addr.Block.block_kind = Block_UserG; |
| 1170 | ai->Addr.Block.block_desc = cgbs[i].desc; |
| 1171 | ai->Addr.Block.block_szB = cgbs[i].size; |
| 1172 | ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start); |
| 1173 | ai->Addr.Block.lastchange = cgbs[i].where; |
| 1174 | return True; |
| 1175 | } |
| 1176 | } |
| 1177 | return False; |
| 1178 | } |
| 1179 | |
| 1180 | |
| 1181 | /*------------------------------------------------------------*/ |
| 1182 | /*--- Suppressions ---*/ |
| 1183 | /*------------------------------------------------------------*/ |
| 1184 | |
| 1185 | typedef |
| 1186 | enum { |
| 1187 | ParamSupp, // Bad syscall params |
| 1188 | UserSupp, // Errors arising from client-request checks |
| 1189 | CoreMemSupp, // Memory errors in core (pthread ops, signal handling) |
| 1190 | |
| 1191 | // Undefined value errors of given size |
| 1192 | Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp, |
| 1193 | |
| 1194 | // Undefined value error in conditional. |
| 1195 | CondSupp, |
| 1196 | |
| 1197 | // Unaddressable read/write attempt at given size |
| 1198 | Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp, |
| 1199 | |
| 1200 | JumpSupp, // Jump to unaddressable target |
| 1201 | FreeSupp, // Invalid or mismatching free |
| 1202 | OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc |
| 1203 | LeakSupp, // Something to be suppressed in a leak check. |
| 1204 | MempoolSupp, // Memory pool suppression. |
| 1205 | } |
| 1206 | MC_SuppKind; |
| 1207 | |
| 1208 | Bool MC_(is_recognised_suppression) ( Char* name, Supp* su ) |
| 1209 | { |
| 1210 | SuppKind skind; |
| 1211 | |
| 1212 | if (VG_STREQ(name, "Param")) skind = ParamSupp; |
| 1213 | else if (VG_STREQ(name, "User")) skind = UserSupp; |
| 1214 | else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp; |
| 1215 | else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp; |
| 1216 | else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp; |
| 1217 | else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp; |
| 1218 | else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp; |
| 1219 | else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp; |
| 1220 | else if (VG_STREQ(name, "Jump")) skind = JumpSupp; |
| 1221 | else if (VG_STREQ(name, "Free")) skind = FreeSupp; |
| 1222 | else if (VG_STREQ(name, "Leak")) skind = LeakSupp; |
| 1223 | else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp; |
| 1224 | else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp; |
| 1225 | else if (VG_STREQ(name, "Cond")) skind = CondSupp; |
| 1226 | else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */ |
| 1227 | else if (VG_STREQ(name, "Value1")) skind = Value1Supp; |
| 1228 | else if (VG_STREQ(name, "Value2")) skind = Value2Supp; |
| 1229 | else if (VG_STREQ(name, "Value4")) skind = Value4Supp; |
| 1230 | else if (VG_STREQ(name, "Value8")) skind = Value8Supp; |
| 1231 | else if (VG_STREQ(name, "Value16")) skind = Value16Supp; |
| 1232 | else |
| 1233 | return False; |
| 1234 | |
| 1235 | VG_(set_supp_kind)(su, skind); |
| 1236 | return True; |
| 1237 | } |
| 1238 | |
| 1239 | Bool MC_(read_extra_suppression_info) ( Int fd, Char* buf, |
| 1240 | Int nBuf, Supp *su ) |
| 1241 | { |
| 1242 | Bool eof; |
| 1243 | |
| 1244 | if (VG_(get_supp_kind)(su) == ParamSupp) { |
| 1245 | eof = VG_(get_line) ( fd, buf, nBuf ); |
| 1246 | if (eof) return False; |
| 1247 | VG_(set_supp_string)(su, VG_(strdup)(buf)); |
| 1248 | } |
| 1249 | return True; |
| 1250 | } |
| 1251 | |
| 1252 | Bool MC_(error_matches_suppression) ( Error* err, Supp* su ) |
| 1253 | { |
| 1254 | Int su_szB; |
| 1255 | MC_Error* extra = VG_(get_error_extra)(err); |
| 1256 | ErrorKind ekind = VG_(get_error_kind )(err); |
| 1257 | |
| 1258 | switch (VG_(get_supp_kind)(su)) { |
| 1259 | case ParamSupp: |
| 1260 | return ((ekind == Err_RegParam || ekind == Err_MemParam) |
| 1261 | && VG_STREQ(VG_(get_error_string)(err), |
| 1262 | VG_(get_supp_string)(su))); |
| 1263 | |
| 1264 | case UserSupp: |
| 1265 | return (ekind == Err_User); |
| 1266 | |
| 1267 | case CoreMemSupp: |
| 1268 | return (ekind == Err_CoreMem |
| 1269 | && VG_STREQ(VG_(get_error_string)(err), |
| 1270 | VG_(get_supp_string)(su))); |
| 1271 | |
| 1272 | case Value1Supp: su_szB = 1; goto value_case; |
| 1273 | case Value2Supp: su_szB = 2; goto value_case; |
| 1274 | case Value4Supp: su_szB = 4; goto value_case; |
| 1275 | case Value8Supp: su_szB = 8; goto value_case; |
| 1276 | case Value16Supp:su_szB =16; goto value_case; |
| 1277 | value_case: |
| 1278 | return (ekind == Err_Value && extra->Err.Value.szB == su_szB); |
| 1279 | |
| 1280 | case CondSupp: |
| 1281 | return (ekind == Err_Cond); |
| 1282 | |
| 1283 | case Addr1Supp: su_szB = 1; goto addr_case; |
| 1284 | case Addr2Supp: su_szB = 2; goto addr_case; |
| 1285 | case Addr4Supp: su_szB = 4; goto addr_case; |
| 1286 | case Addr8Supp: su_szB = 8; goto addr_case; |
| 1287 | case Addr16Supp:su_szB =16; goto addr_case; |
| 1288 | addr_case: |
| 1289 | return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB); |
| 1290 | |
| 1291 | case JumpSupp: |
| 1292 | return (ekind == Err_Jump); |
| 1293 | |
| 1294 | case FreeSupp: |
| 1295 | return (ekind == Err_Free || ekind == Err_FreeMismatch); |
| 1296 | |
| 1297 | case OverlapSupp: |
| 1298 | return (ekind == Err_Overlap); |
| 1299 | |
| 1300 | case LeakSupp: |
| 1301 | return (ekind == Err_Leak); |
| 1302 | |
| 1303 | case MempoolSupp: |
| 1304 | return (ekind == Err_IllegalMempool); |
| 1305 | |
| 1306 | default: |
| 1307 | VG_(printf)("Error:\n" |
| 1308 | " unknown suppression type %d\n", |
| 1309 | VG_(get_supp_kind)(su)); |
| 1310 | VG_(tool_panic)("unknown suppression type in " |
| 1311 | "MC_(error_matches_suppression)"); |
| 1312 | } |
| 1313 | } |
| 1314 | |
| 1315 | Char* MC_(get_error_name) ( Error* err ) |
| 1316 | { |
| 1317 | switch (VG_(get_error_kind)(err)) { |
| 1318 | case Err_RegParam: return "Param"; |
| 1319 | case Err_MemParam: return "Param"; |
| 1320 | case Err_User: return "User"; |
| 1321 | case Err_FreeMismatch: return "Free"; |
| 1322 | case Err_IllegalMempool: return "Mempool"; |
| 1323 | case Err_Free: return "Free"; |
| 1324 | case Err_Jump: return "Jump"; |
| 1325 | case Err_CoreMem: return "CoreMem"; |
| 1326 | case Err_Overlap: return "Overlap"; |
| 1327 | case Err_Leak: return "Leak"; |
| 1328 | case Err_Cond: return "Cond"; |
| 1329 | case Err_Addr: { |
| 1330 | MC_Error* extra = VG_(get_error_extra)(err); |
| 1331 | switch ( extra->Err.Addr.szB ) { |
| 1332 | case 1: return "Addr1"; |
| 1333 | case 2: return "Addr2"; |
| 1334 | case 4: return "Addr4"; |
| 1335 | case 8: return "Addr8"; |
| 1336 | case 16: return "Addr16"; |
| 1337 | default: VG_(tool_panic)("unexpected size for Addr"); |
| 1338 | } |
| 1339 | } |
| 1340 | case Err_Value: { |
| 1341 | MC_Error* extra = VG_(get_error_extra)(err); |
| 1342 | switch ( extra->Err.Value.szB ) { |
| 1343 | case 1: return "Value1"; |
| 1344 | case 2: return "Value2"; |
| 1345 | case 4: return "Value4"; |
| 1346 | case 8: return "Value8"; |
| 1347 | case 16: return "Value16"; |
| 1348 | default: VG_(tool_panic)("unexpected size for Value"); |
| 1349 | } |
| 1350 | } |
| 1351 | default: VG_(tool_panic)("get_error_name: unexpected type"); |
| 1352 | } |
| 1353 | } |
| 1354 | |
| 1355 | void MC_(print_extra_suppression_info) ( Error* err ) |
| 1356 | { |
| 1357 | ErrorKind ekind = VG_(get_error_kind )(err); |
| 1358 | if (Err_RegParam == ekind || Err_MemParam == ekind) { |
| 1359 | VG_(printf)(" %s\n", VG_(get_error_string)(err)); |
| 1360 | } |
| 1361 | } |
| 1362 | |
| 1363 | |
| 1364 | /*--------------------------------------------------------------------*/ |
| 1365 | /*--- end mc_errors.c ---*/ |
| 1366 | /*--------------------------------------------------------------------*/ |