blob: 7a8d14b4bb8d706e862f5cc50c9683f847dfa46e [file] [log] [blame]
njn5c004e42002-11-18 11:04:50 +00001
2/*--------------------------------------------------------------------*/
3/*--- Code that is shared between MemCheck and AddrCheck. ---*/
4/*--- mc_common.h ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33#ifndef __MC_COMMON_H
34#define __MC_COMMON_H
35
36#include "vg_skin.h"
37#include "mc_constants.h"
38
njn9b007f62003-04-07 14:40:25 +000039/*------------------------------------------------------------*/
40/*--- Errors and suppressions ---*/
41/*------------------------------------------------------------*/
42
njn5c004e42002-11-18 11:04:50 +000043/* The classification of a faulting address. */
44typedef
45 enum { Undescribed, /* as-yet unclassified */
46 Stack,
47 Unknown, /* classification yielded nothing useful */
48 Freed, Mallocd,
sewardja81709d2002-12-28 12:55:48 +000049 UserG /* in a user-defined block; Addrcheck & Memcheck only */
njn5c004e42002-11-18 11:04:50 +000050 }
51 AddrKind;
52
53/* Records info about a faulting address. */
54typedef
55 struct {
56 /* ALL */
57 AddrKind akind;
58 /* Freed, Mallocd */
59 Int blksize;
60 /* Freed, Mallocd */
61 Int rwoffset;
62 /* Freed, Mallocd */
63 ExeContext* lastchange;
64 /* Stack */
65 ThreadId stack_tid;
66 /* True if is just-below %esp -- could be a gcc bug. */
67 Bool maybe_gcc;
68 }
69 AddrInfo;
70
71typedef
72 enum {
73 /* Bad syscall params */
74 ParamSupp,
75 /* Memory errors in core (pthread ops, signal handling) */
76 CoreMemSupp,
77 /* Use of invalid values of given size (MemCheck only) */
78 Value0Supp, Value1Supp, Value2Supp, Value4Supp, Value8Supp,
79 /* Invalid read/write attempt at given size */
80 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp,
81 /* Invalid or mismatching free */
sewardj99aac972002-12-26 01:53:45 +000082 FreeSupp,
83 /* Something to be suppressed in a leak check. */
84 LeakSupp
njn5c004e42002-11-18 11:04:50 +000085 }
86 MemCheckSuppKind;
87
88/* What kind of error it is. */
89typedef
90 enum { ValueErr, /* Memcheck only */
91 CoreMemErr,
92 AddrErr,
93 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
94 FreeErr, FreeMismatchErr
95 }
96 MemCheckErrorKind;
97
98/* What kind of memory access is involved in the error? */
99typedef
100 enum { ReadAxs, WriteAxs, ExecAxs }
101 AxsKind;
102
103/* Extra context for memory errors */
104typedef
105 struct {
106 /* AddrErr */
107 AxsKind axskind;
108 /* AddrErr, ValueErr */
109 Int size;
110 /* AddrErr, FreeErr, FreeMismatchErr, ParamErr, UserErr */
111 AddrInfo addrinfo;
112 /* ParamErr, UserErr, CoreMemErr */
113 Bool isWrite;
114 }
115 MemCheckError;
116
njn9b007f62003-04-07 14:40:25 +0000117/*------------------------------------------------------------*/
118/*--- Profiling of skins and memory events ---*/
119/*------------------------------------------------------------*/
120
121typedef
122 enum {
123 VgpCheckMem = VgpFini+1,
124 VgpSetMem,
125 VgpESPAdj
126 }
127 VgpSkinCC;
128
129/* Define to collect detailed performance info. */
130/* #define VG_PROFILE_MEMORY */
njn5c004e42002-11-18 11:04:50 +0000131
132#ifdef VG_PROFILE_MEMORY
njn9b007f62003-04-07 14:40:25 +0000133# define N_PROF_EVENTS 150
njn5c004e42002-11-18 11:04:50 +0000134
njn9b007f62003-04-07 14:40:25 +0000135extern UInt MC_(event_ctr)[N_PROF_EVENTS];
136
137# define PROF_EVENT(ev) \
njn5c004e42002-11-18 11:04:50 +0000138 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
139 MC_(event_ctr)[ev]++; \
140 } while (False);
141
142#else
143
njn9b007f62003-04-07 14:40:25 +0000144# define PROF_EVENT(ev) /* */
njn5c004e42002-11-18 11:04:50 +0000145
146#endif /* VG_PROFILE_MEMORY */
147
njn9b007f62003-04-07 14:40:25 +0000148/*------------------------------------------------------------*/
149/*--- V and A bits ---*/
150/*------------------------------------------------------------*/
151
njn5c004e42002-11-18 11:04:50 +0000152#define IS_DISTINGUISHED_SM(smap) \
153 ((smap) == &distinguished_secondary_map)
154
155#define ENSURE_MAPPABLE(addr,caller) \
156 do { \
157 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
158 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
159 /* VG_(printf)("new 2map because of %p\n", addr); */ \
160 } \
161 } while(0)
162
163#define BITARR_SET(aaa_p,iii_p) \
164 do { \
165 UInt iii = (UInt)iii_p; \
166 UChar* aaa = (UChar*)aaa_p; \
167 aaa[iii >> 3] |= (1 << (iii & 7)); \
168 } while (0)
169
170#define BITARR_CLEAR(aaa_p,iii_p) \
171 do { \
172 UInt iii = (UInt)iii_p; \
173 UChar* aaa = (UChar*)aaa_p; \
174 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
175 } while (0)
176
177#define BITARR_TEST(aaa_p,iii_p) \
178 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
179 & (1 << (((UInt)iii_p) & 7)))) \
180
181
182#define VGM_BIT_VALID 0
183#define VGM_BIT_INVALID 1
184
185#define VGM_NIBBLE_VALID 0
186#define VGM_NIBBLE_INVALID 0xF
187
188#define VGM_BYTE_VALID 0
189#define VGM_BYTE_INVALID 0xFF
190
191#define VGM_WORD_VALID 0
192#define VGM_WORD_INVALID 0xFFFFFFFF
193
194#define VGM_EFLAGS_VALID 0xFFFFFFFE
195#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
196
197/*------------------------------------------------------------*/
198/*--- Command line options + defaults ---*/
199/*------------------------------------------------------------*/
200
201/* Most are shared between MemCheck and AddrCheck, the last two are
202 MemCheck only (but here anyway for simplicity) */
203
204/* Allow loads from partially-valid addresses? default: YES */
205extern Bool MC_(clo_partial_loads_ok);
206
207/* Max volume of the freed blocks queue. */
208extern Int MC_(clo_freelist_vol);
209
210/* Do leak check at exit? default: NO */
211extern Bool MC_(clo_leak_check);
212
213/* How closely should we compare ExeContexts in leak records? default: 2 */
214extern VgRes MC_(clo_leak_resolution);
215
216/* In leak check, show reachable-but-not-freed blocks? default: NO */
217extern Bool MC_(clo_show_reachable);
218
219/* Assume accesses immediately below %esp are due to gcc-2.96 bugs.
220 * default: NO*/
221extern Bool MC_(clo_workaround_gcc296_bugs);
222
223/* DEBUG: clean up instrumented code? default: YES */
224extern Bool MC_(clo_cleanup);
225
njn5c004e42002-11-18 11:04:50 +0000226/* When instrumenting, omit some checks if tell-tale literals for
227 inlined strlen() are visible in the basic block. default: YES */
228extern Bool MC_(clo_avoid_strlen_errors);
229
njn5c004e42002-11-18 11:04:50 +0000230extern Bool MC_(process_common_cmd_line_option)(Char* arg);
231
sewardj99aac972002-12-26 01:53:45 +0000232
njn5c004e42002-11-18 11:04:50 +0000233/*------------------------------------------------------------*/
234/*--- Functions ---*/
235/*------------------------------------------------------------*/
236
237extern void MC_(set_where) ( ShadowChunk* sc, ExeContext* ec );
238extern ExeContext *MC_(get_where) ( ShadowChunk* sc );
239
240extern void MC_(pp_AddrInfo) ( Addr a, AddrInfo* ai );
241
242extern void MC_(clear_MemCheckError) ( MemCheckError* err_extra );
243
244extern void MC_(record_address_error) ( Addr a, Int size, Bool isWrite );
245extern void MC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite,
246 Char* s );
247extern void MC_(record_param_error) ( ThreadState* tst, Addr a,
248 Bool isWriteLack, Char* msg );
249extern void MC_(record_jump_error) ( ThreadState* tst, Addr a );
250extern void MC_(record_free_error) ( ThreadState* tst, Addr a );
251extern void MC_(record_freemismatch_error)( ThreadState* tst, Addr a );
252
253extern void MC_(init_prof_mem) ( void );
254extern void MC_(done_prof_mem) ( void );
255
256extern Int MC_(count_freelist) ( void ) __attribute__ ((unused));
257extern void MC_(freelist_sanity) ( void ) __attribute__ ((unused));
258extern ShadowChunk* MC_(any_matching_freed_ShadowChunks)
259 ( Bool (*p)(ShadowChunk*) );
260
njn9b007f62003-04-07 14:40:25 +0000261extern __attribute__((regparm(1))) void MC_(new_mem_stack_4) ( Addr old_ESP );
262extern __attribute__((regparm(1))) void MC_(die_mem_stack_4) ( Addr old_ESP );
263extern __attribute__((regparm(1))) void MC_(new_mem_stack_8) ( Addr old_ESP );
264extern __attribute__((regparm(1))) void MC_(die_mem_stack_8) ( Addr old_ESP );
265extern __attribute__((regparm(1))) void MC_(new_mem_stack_12) ( Addr old_ESP );
266extern __attribute__((regparm(1))) void MC_(die_mem_stack_12) ( Addr old_ESP );
267extern __attribute__((regparm(1))) void MC_(new_mem_stack_16) ( Addr old_ESP );
268extern __attribute__((regparm(1))) void MC_(die_mem_stack_16) ( Addr old_ESP );
269extern __attribute__((regparm(1))) void MC_(new_mem_stack_32) ( Addr old_ESP );
270extern __attribute__((regparm(1))) void MC_(die_mem_stack_32) ( Addr old_ESP );
271extern void MC_(die_mem_stack) ( Addr a, UInt len );
272extern void MC_(new_mem_stack) ( Addr a, UInt len );
273
274
275/*------------------------------------------------------------*/
276/*--- Stack pointer adjustment ---*/
277/*------------------------------------------------------------*/
278
279/* Some noble preprocessor abuse, to enable Memcheck and Addrcheck to
280 share this code, but not call the same functions.
281
282 Note that this code is executed very frequently and must be highly
283 optimised, which is why I resort to the preprocessor to achieve the
284 factoring, rather than eg. using function pointers.
285*/
286
287#define ESP_UPDATE_HANDLERS(ALIGNED4_NEW, ALIGNED4_DIE, \
288 ALIGNED8_NEW, ALIGNED8_DIE, \
289 UNALIGNED_NEW, UNALIGNED_DIE) \
290 \
291void __attribute__((regparm(1))) MC_(new_mem_stack_4)(Addr new_ESP) \
292{ \
293 PROF_EVENT(110); \
294 if (IS_ALIGNED4_ADDR(new_ESP)) { \
295 ALIGNED4_NEW ( new_ESP ); \
296 } else { \
297 UNALIGNED_NEW ( new_ESP, 4 ); \
298 } \
299} \
300 \
301void __attribute__((regparm(1))) MC_(die_mem_stack_4)(Addr new_ESP) \
302{ \
303 PROF_EVENT(120); \
304 if (IS_ALIGNED4_ADDR(new_ESP)) { \
305 ALIGNED4_DIE ( new_ESP-4 ); \
306 } else { \
307 UNALIGNED_DIE ( new_ESP-4, 4 ); \
308 } \
309} \
310 \
311void __attribute__((regparm(1))) MC_(new_mem_stack_8)(Addr new_ESP) \
312{ \
313 PROF_EVENT(111); \
314 if (IS_ALIGNED8_ADDR(new_ESP)) { \
315 ALIGNED8_NEW ( new_ESP ); \
316 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
317 ALIGNED4_NEW ( new_ESP ); \
318 ALIGNED4_NEW ( new_ESP+4 ); \
319 } else { \
320 UNALIGNED_NEW ( new_ESP, 8 ); \
321 } \
322} \
323 \
324void __attribute__((regparm(1))) MC_(die_mem_stack_8)(Addr new_ESP) \
325{ \
326 PROF_EVENT(121); \
327 if (IS_ALIGNED8_ADDR(new_ESP)) { \
328 ALIGNED8_DIE ( new_ESP-8 ); \
329 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
330 ALIGNED4_DIE ( new_ESP-8 ); \
331 ALIGNED4_DIE ( new_ESP-4 ); \
332 } else { \
333 UNALIGNED_DIE ( new_ESP-8, 8 ); \
334 } \
335} \
336 \
337void __attribute__((regparm(1))) MC_(new_mem_stack_12)(Addr new_ESP) \
338{ \
339 PROF_EVENT(112); \
340 if (IS_ALIGNED8_ADDR(new_ESP)) { \
341 ALIGNED8_NEW ( new_ESP ); \
342 ALIGNED4_NEW ( new_ESP+8 ); \
343 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
344 ALIGNED4_NEW ( new_ESP ); \
345 ALIGNED8_NEW ( new_ESP+4 ); \
346 } else { \
347 UNALIGNED_NEW ( new_ESP, 12 ); \
348 } \
349} \
350 \
351void __attribute__((regparm(1))) MC_(die_mem_stack_12)(Addr new_ESP) \
352{ \
353 PROF_EVENT(122); \
354 /* Note the -12 in the test */ \
355 if (IS_ALIGNED8_ADDR(new_ESP-12)) { \
356 ALIGNED8_DIE ( new_ESP-12 ); \
357 ALIGNED4_DIE ( new_ESP-4 ); \
358 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
359 ALIGNED4_DIE ( new_ESP-12 ); \
360 ALIGNED8_DIE ( new_ESP-8 ); \
361 } else { \
362 UNALIGNED_DIE ( new_ESP-12, 12 ); \
363 } \
364} \
365 \
366void __attribute__((regparm(1))) MC_(new_mem_stack_16)(Addr new_ESP) \
367{ \
368 PROF_EVENT(113); \
369 if (IS_ALIGNED8_ADDR(new_ESP)) { \
370 ALIGNED8_NEW ( new_ESP ); \
371 ALIGNED8_NEW ( new_ESP+8 ); \
372 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
373 ALIGNED4_NEW ( new_ESP ); \
374 ALIGNED8_NEW ( new_ESP+4 ); \
375 ALIGNED4_NEW ( new_ESP+12 ); \
376 } else { \
377 UNALIGNED_NEW ( new_ESP, 16 ); \
378 } \
379} \
380 \
381void __attribute__((regparm(1))) MC_(die_mem_stack_16)(Addr new_ESP) \
382{ \
383 PROF_EVENT(123); \
384 if (IS_ALIGNED8_ADDR(new_ESP)) { \
385 ALIGNED8_DIE ( new_ESP-16 ); \
386 ALIGNED8_DIE ( new_ESP-8 ); \
387 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
388 ALIGNED4_DIE ( new_ESP-16 ); \
389 ALIGNED8_DIE ( new_ESP-12 ); \
390 ALIGNED4_DIE ( new_ESP-4 ); \
391 } else { \
392 UNALIGNED_DIE ( new_ESP-16, 16 ); \
393 } \
394} \
395 \
396void __attribute__((regparm(1))) MC_(new_mem_stack_32)(Addr new_ESP) \
397{ \
398 PROF_EVENT(114); \
399 if (IS_ALIGNED8_ADDR(new_ESP)) { \
400 ALIGNED8_NEW ( new_ESP ); \
401 ALIGNED8_NEW ( new_ESP+8 ); \
402 ALIGNED8_NEW ( new_ESP+16 ); \
403 ALIGNED8_NEW ( new_ESP+24 ); \
404 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
405 ALIGNED4_NEW ( new_ESP ); \
406 ALIGNED8_NEW ( new_ESP+4 ); \
407 ALIGNED8_NEW ( new_ESP+12 ); \
408 ALIGNED8_NEW ( new_ESP+20 ); \
409 ALIGNED4_NEW ( new_ESP+28 ); \
410 } else { \
411 UNALIGNED_NEW ( new_ESP, 32 ); \
412 } \
413} \
414 \
415void __attribute__((regparm(1))) MC_(die_mem_stack_32)(Addr new_ESP) \
416{ \
417 PROF_EVENT(124); \
418 if (IS_ALIGNED8_ADDR(new_ESP)) { \
419 ALIGNED8_DIE ( new_ESP-32 ); \
420 ALIGNED8_DIE ( new_ESP-24 ); \
421 ALIGNED8_DIE ( new_ESP-16 ); \
422 ALIGNED8_DIE ( new_ESP- 8 ); \
423 } else if (IS_ALIGNED4_ADDR(new_ESP)) { \
424 ALIGNED4_DIE ( new_ESP-32 ); \
425 ALIGNED8_DIE ( new_ESP-28 ); \
426 ALIGNED8_DIE ( new_ESP-20 ); \
427 ALIGNED8_DIE ( new_ESP-12 ); \
428 ALIGNED4_DIE ( new_ESP-4 ); \
429 } else { \
430 UNALIGNED_DIE ( new_ESP-32, 32 ); \
431 } \
432} \
433 \
434void MC_(new_mem_stack) ( Addr a, UInt len ) \
435{ \
436 PROF_EVENT(115); \
437 UNALIGNED_NEW ( a, len ); \
438} \
439 \
440void MC_(die_mem_stack) ( Addr a, UInt len ) \
441{ \
442 PROF_EVENT(125); \
443 UNALIGNED_DIE ( a, len ); \
444}
445
njn5c004e42002-11-18 11:04:50 +0000446#endif /* __MC_COMMON_H */
447
448/*--------------------------------------------------------------------*/
449/*--- end mc_common.h ---*/
450/*--------------------------------------------------------------------*/