blob: 6864d40923858584e285e8418803b1f5db450547 [file] [log] [blame]
sewardjb5f6f512005-03-10 23:59:00 +00001/* -*- c -*-
njn25e49d8e72002-09-23 09:36:25 +00002 ----------------------------------------------------------------
3
4 Notice that the following BSD-style license applies to this one
5 file (valgrind.h) only. The entire rest of Valgrind is licensed
6 under the terms of the GNU General Public License, version 2. See
7 the COPYING file in the source distribution for details.
8
9 ----------------------------------------------------------------
10
njnb9c427c2004-12-01 14:14:42 +000011 This file is part of Valgrind, a dynamic binary instrumentation
12 framework.
sewardjde4a1d02002-03-22 01:27:54 +000013
njn53612422005-03-12 16:22:54 +000014 Copyright (C) 2000-2005 Julian Seward. All rights reserved.
sewardjde4a1d02002-03-22 01:27:54 +000015
njn25e49d8e72002-09-23 09:36:25 +000016 Redistribution and use in source and binary forms, with or without
17 modification, are permitted provided that the following conditions
18 are met:
sewardjde4a1d02002-03-22 01:27:54 +000019
njn25e49d8e72002-09-23 09:36:25 +000020 1. Redistributions of source code must retain the above copyright
21 notice, this list of conditions and the following disclaimer.
sewardjde4a1d02002-03-22 01:27:54 +000022
njn25e49d8e72002-09-23 09:36:25 +000023 2. The origin of this software must not be misrepresented; you must
24 not claim that you wrote the original software. If you use this
25 software in a product, an acknowledgment in the product
26 documentation would be appreciated but is not required.
sewardjde4a1d02002-03-22 01:27:54 +000027
njn25e49d8e72002-09-23 09:36:25 +000028 3. Altered source versions must be plainly marked as such, and must
29 not be misrepresented as being the original software.
30
31 4. The name of the author may not be used to endorse or promote
32 products derived from this software without specific prior written
33 permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
36 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
37 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
39 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
41 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
42 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
43 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46
47 ----------------------------------------------------------------
48
49 Notice that the above BSD-style license applies to this one file
50 (valgrind.h) only. The entire rest of Valgrind is licensed under
51 the terms of the GNU General Public License, version 2. See the
52 COPYING file in the source distribution for details.
53
54 ----------------------------------------------------------------
sewardjde4a1d02002-03-22 01:27:54 +000055*/
56
57
njn30d76c62005-06-18 15:07:39 +000058/* This file is for inclusion into client (your!) code.
59
60 You can use these macros to manipulate and query Valgrind's
61 execution inside your own programs.
62
63 The resulting executables will still run without Valgrind, just a
64 little bit more slowly than they otherwise would, but otherwise
65 unchanged. When not running on valgrind, each client request
sewardj0ec07f32006-01-12 12:32:32 +000066 consumes very few (eg. 7) instructions, so the resulting performance
njn30d76c62005-06-18 15:07:39 +000067 loss is negligible unless you plan to execute client requests
68 millions of times per second. Nevertheless, if that is still a
69 problem, you can compile with the NVALGRIND symbol defined (gcc
70 -DNVALGRIND) so that client requests are not even compiled in. */
71
sewardjde4a1d02002-03-22 01:27:54 +000072#ifndef __VALGRIND_H
73#define __VALGRIND_H
74
fitzhardinge39de4b42003-10-31 07:12:21 +000075#include <stdarg.h>
76
njn3dd0a912005-06-28 19:44:10 +000077/* Nb: this file might be included in a file compiled with -ansi. So
78 we can't use C++ style "//" comments nor the "asm" keyword (instead
79 use "__asm__"). */
80
sewardj0ec07f32006-01-12 12:32:32 +000081/* Derive some tags indicating what the target architecture is. Note
82 that in this file we're using the compiler's CPP symbols for
83 identifying architectures, which are different to the ones we use
84 within the rest of Valgrind. Note, __powerpc__ is active for both
85 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
86 latter. */
87#undef ARCH_x86
88#undef ARCH_amd64
89#undef ARCH_ppc32
90#undef ARCH_ppc64
91
92#if defined(__i386__)
93# define ARCH_x86 1
94#elif defined(__x86_64__)
95# define ARCH_amd64 1
96#elif defined(__powerpc__) && !defined(__powerpc64__)
97# define ARCH_ppc32 1
98#elif defined(__powerpc__) && defined(__powerpc64__)
99# define ARCH_ppc64 1
sewardjb5f6f512005-03-10 23:59:00 +0000100#endif
101
sewardj0ec07f32006-01-12 12:32:32 +0000102/* If we're not compiling for our target architecture, don't generate
103 any inline asms. */
104#if !defined(ARCH_x86) && !defined(ARCH_amd64) \
105 && !defined(ARCH_ppc32) && !defined(ARCH_ppc64)
106# if !defined(NVALGRIND)
107# define NVALGRIND 1
108# endif
109#endif
110
111
njn30d76c62005-06-18 15:07:39 +0000112/* ------------------------------------------------------------------ */
sewardj0ec07f32006-01-12 12:32:32 +0000113/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
114/* in here of use to end-users -- skip to the next section. */
njn30d76c62005-06-18 15:07:39 +0000115/* ------------------------------------------------------------------ */
sewardjde4a1d02002-03-22 01:27:54 +0000116
sewardj0ec07f32006-01-12 12:32:32 +0000117#if defined(NVALGRIND)
njn26aba4d2005-05-16 13:31:23 +0000118
119/* Define NVALGRIND to completely remove the Valgrind magic sequence
sewardj0ec07f32006-01-12 12:32:32 +0000120 from the compiled code (analogous to NDEBUG's effects on
121 assert()) */
122#define VALGRIND_DO_CLIENT_REQUEST( \
123 _zzq_rlval, _zzq_default, _zzq_request, \
124 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
125 { \
126 (_zzq_rlval) = (_zzq_default); \
njn26aba4d2005-05-16 13:31:23 +0000127 }
128
sewardj0ec07f32006-01-12 12:32:32 +0000129#else /* ! NVALGRIND */
nethercotee90c6832004-10-18 18:07:49 +0000130
sewardj0ec07f32006-01-12 12:32:32 +0000131/* The following defines the magic code sequences which the JITter
132 spots and handles magically. Don't look too closely at them as
133 they will rot your brain.
134
135 The assembly code sequences for all architectures is in this one
136 file. This is because this file must be stand-alone, and we don't
137 want to have multiple files.
138
139 For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
140 value gets put in the return slot, so that everything works when
141 this is executed not under Valgrind. Args are passed in a memory
142 block, and so there's no intrinsic limit to the number that could
143 be passed, but it's currently four.
nethercotee90c6832004-10-18 18:07:49 +0000144
nethercote54265442004-10-26 12:56:58 +0000145 The macro args are:
146 _zzq_rlval result lvalue
147 _zzq_default default value (result returned when running on real CPU)
148 _zzq_request request code
149 _zzq_arg1..4 request params
150
sewardj0ec07f32006-01-12 12:32:32 +0000151 The other two macros are used to support function wrapping, and are
152 a lot simpler. VALGRIND_GET_NRADDR returns the value of the
153 guest's NRADDR pseudo-register. VALGRIND_CALL_NOREDIR_* behaves
154 the same as the following on the guest, but guarantees that the
155 branch instruction will not be redirected: x86: call *%eax, amd64:
156 call *%rax, ppc32/ppc64: bctrl. VALGRIND_CALL_NOREDIR is just
157 text, not a complete inline asm, since it needs to be combined with
158 more magic inline asm stuff to be useful.
nethercotee90c6832004-10-18 18:07:49 +0000159*/
160
sewardj0ec07f32006-01-12 12:32:32 +0000161/* ---------------------------- x86 ---------------------------- */
sewardjde4a4ab2005-03-23 13:10:32 +0000162
sewardj0ec07f32006-01-12 12:32:32 +0000163#if defined(ARCH_x86)
164#define __SPECIAL_INSTRUCTION_PREAMBLE \
165 "roll $3, %%edi ; roll $13, %%edi\n\t" \
166 "roll $29, %%edi ; roll $19, %%edi\n\t" \
sewardjde4a4ab2005-03-23 13:10:32 +0000167
sewardj0ec07f32006-01-12 12:32:32 +0000168#define VALGRIND_DO_CLIENT_REQUEST( \
169 _zzq_rlval, _zzq_default, _zzq_request, \
170 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
171 { volatile unsigned int _zzq_args[5]; \
172 volatile unsigned int _zzq_result; \
173 _zzq_args[0] = (unsigned int)(_zzq_request); \
174 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
175 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
176 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
177 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
178 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
179 /* %EDX = client_request ( %EAX ) */ \
180 "xchgl %%ebx,%%ebx" \
181 : "=d" (_zzq_result) \
182 : "a" (&_zzq_args[0]), "0" (_zzq_default) \
183 : "cc", "memory" \
184 ); \
185 _zzq_rlval = _zzq_result; \
cerion85665ca2005-06-20 15:51:07 +0000186 }
sewardj2c48c7b2005-11-29 13:05:56 +0000187
sewardj0ec07f32006-01-12 12:32:32 +0000188#define VALGRIND_GET_NRADDR(_zzq_rlval) \
189 { volatile unsigned int __addr; \
190 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
191 /* %EAX = guest_NRADDR */ \
192 "xchgl %%ecx,%%ecx" \
193 : "=a" (__addr) \
194 : \
195 : "cc", "memory" \
196 ); \
197 _zzq_rlval = (void*)__addr; \
sewardj2c48c7b2005-11-29 13:05:56 +0000198 }
sewardj0ec07f32006-01-12 12:32:32 +0000199
200#define VALGRIND_CALL_NOREDIR_EAX \
201 __SPECIAL_INSTRUCTION_PREAMBLE \
202 /* call-noredir *%EAX */ \
203 "xchgl %%edx,%%edx\n\t"
204#endif /* ARCH_x86 */
205
206/* --------------------------- amd64 --------------------------- */
207
208#if defined(ARCH_amd64)
209#define __SPECIAL_INSTRUCTION_PREAMBLE \
210 "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
211 "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" \
212
213#define VALGRIND_DO_CLIENT_REQUEST( \
214 _zzq_rlval, _zzq_default, _zzq_request, \
215 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
216 { volatile unsigned long long int _zzq_args[5]; \
217 volatile unsigned long long int _zzq_result; \
218 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
219 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
220 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
221 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
222 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
223 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
224 /* %RDX = client_request ( %RAX ) */ \
225 "xchgq %%rbx,%%rbx" \
226 : "=d" (_zzq_result) \
227 : "a" (&_zzq_args[0]), "0" (_zzq_default) \
228 : "cc", "memory" \
229 ); \
230 _zzq_rlval = _zzq_result; \
231 }
232
233#define VALGRIND_GET_NRADDR(_zzq_rlval) \
234 { volatile unsigned long long int __addr; \
235 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
236 /* %RAX = guest_NRADDR */ \
237 "xchgq %%rcx,%%rcx" \
238 : "=a" (__addr) \
239 : \
240 : "cc", "memory" \
241 ); \
242 _zzq_rlval = (void*)__addr; \
243 }
244
245#define VALGRIND_CALL_NOREDIR_RAX \
246 __SPECIAL_INSTRUCTION_PREAMBLE \
247 /* call-noredir *%RAX */ \
248 "xchgq %%rdx,%%rdx\n\t"
249#endif /* ARCH_amd64 */
250
251/* --------------------------- ppc32 --------------------------- */
252
253#if defined(ARCH_ppc32)
254#define __SPECIAL_INSTRUCTION_PREAMBLE \
255 "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
256 "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t" \
257
258#define VALGRIND_DO_CLIENT_REQUEST( \
259 _zzq_rlval, _zzq_default, _zzq_request, \
260 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
261 \
262 { volatile unsigned int _zzq_args[5]; \
263 register unsigned int _zzq_result __asm__("r3"); \
264 register volatile unsigned int *_zzq_ptr __asm__("r4"); \
265 _zzq_args[0] = (unsigned int)(_zzq_request); \
266 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
267 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
268 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
269 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
270 _zzq_ptr = _zzq_args; \
271 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
272 /* %R3 = client_request ( %R4 ) */ \
273 "or 1,1,1" \
274 : "=r" (_zzq_result) \
275 : "0" (_zzq_default), "r" (_zzq_ptr) \
276 : "cc", "memory"); \
277 _zzq_rlval = _zzq_result; \
278 }
279
280#define VALGRIND_GET_NRADDR(_zzq_rlval) \
281 { register unsigned int __addr __asm__("r3"); \
282 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
283 /* %R3 = guest_NRADDR */ \
284 "or 2,2,2" \
285 : "=r" (__addr) \
286 : \
287 : "cc", "memory" \
288 ); \
289 _zzq_rlval = (void*)__addr; \
290 }
291
292#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
293 __SPECIAL_INSTRUCTION_PREAMBLE \
294 /* branch-and-link-to-noredir *%R11 */ \
295 "or 3,3,3\n\t"
296#endif /* ARCH_ppc32 */
297
298/* --------------------------- ppc64 --------------------------- */
299
300#if defined(ARCH_ppc64)
301#define VALGRIND_DO_CLIENT_REQUEST( \
302 _zzq_rlval, _zzq_default, _zzq_request, \
303 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
304 \
305 { volatile unsigned long long int _zzq_args[5]; \
306 register unsigned long long int _zzq_tmp __asm__("r3"); \
307 register volatile unsigned long long int *_zzq_ptr __asm__("r4"); \
308 _zzq_args[0] = (volatile unsigned long long int)(_zzq_request); \
309 _zzq_args[1] = (volatile unsigned long long int)(_zzq_arg1); \
310 _zzq_args[2] = (volatile unsigned long long int)(_zzq_arg2); \
311 _zzq_args[3] = (volatile unsigned long long int)(_zzq_arg3); \
312 _zzq_args[4] = (volatile unsigned long long int)(_zzq_arg4); \
313 _zzq_ptr = _zzq_args; \
314 __asm__ volatile("tw 0,3,27\n\t" \
315 "rotldi 0,0,61\n\t" \
316 "rotldi 0,0,3\n\t" \
317 "rotldi 0,0,13\n\t" \
318 "rotldi 0,0,51\n\t" \
319 "nop\n\t" \
320 : "=r" (_zzq_tmp) \
321 : "0" (_zzq_default), "r" (_zzq_ptr) \
322 : "memory"); \
323 _zzq_rlval = (__typeof__(_zzq_rlval)) _zzq_tmp; \
324 }
325#endif /* ARCH_ppc64 */
cerion85665ca2005-06-20 15:51:07 +0000326
njn3dd0a912005-06-28 19:44:10 +0000327/* Insert assembly code for other architectures here... */
njn26aba4d2005-05-16 13:31:23 +0000328
sewardj37091fb2002-11-16 11:06:50 +0000329#endif /* NVALGRIND */
sewardj2e93c502002-04-12 11:12:52 +0000330
nethercote69d9c462004-10-26 13:00:12 +0000331
njn30d76c62005-06-18 15:07:39 +0000332/* ------------------------------------------------------------------ */
sewardj0ec07f32006-01-12 12:32:32 +0000333/* ARCHITECTURE SPECIFICS for FUNCTION WRAPPING. This is all very */
334/* ugly. It's the least-worst tradeoff I can think of. */
335/* ------------------------------------------------------------------ */
336
337/* This section defines magic (a.k.a appalling-hack) macros for doing
338 guaranteed-no-redirection macros, so as to get from function
339 wrappers to the functions they are wrapping. The whole point is to
340 construct standard call sequences, but to do the call itself with a
341 special no-redirect call pseudo-instruction that the JIT
342 understands and handles specially. This section is long and
343 repetitious, and I can't see a way to make it shorter.
344
345 The naming scheme is as follows:
346
347 CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
348
349 'W' stands for "word" and 'v' for "void". Hence there are
350 different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
351 and for each, the possibility of returning a word-typed result, or
352 no result.
353*/
354
355/* Use these to write the name of your wrapper. NOTE: duplicates
356 VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
357
358#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
359 _vgwZU_##soname##_##fnname
360
361#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
362 _vgwZZ_##soname##_##fnname
363
364/* Use this macro from within a wrapper function to get the address of
365 the original function. Once you have that you can then use it in
366 one of the CALL_FN_ macros. */
367#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NRADDR(_lval)
368
369/* Derivatives of the main macros below, for calling functions
370 returning void. */
371
372#define CALL_FN_v_v(fnptr) \
373 do { volatile unsigned long _junk; \
374 CALL_FN_W_v(_junk,fnptr); } while (0)
375
376#define CALL_FN_v_W(fnptr, arg1) \
377 do { volatile unsigned long _junk; \
378 CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
379
380#define CALL_FN_v_WW(fnptr, arg1,arg2) \
381 do { volatile unsigned long _junk; \
382 CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
383
384/* ---------------------------- x86 ---------------------------- */
385
386#if defined(ARCH_x86)
387
388/* These regs are trashed by the hidden call. No need to mention eax
389 as gcc can already see that, plus causes gcc to bomb. */
390#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
391
392/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
393 long) == 4. */
394
395#define CALL_FN_W_v(lval, fnptr) \
396 do { \
397 volatile void* _fnptr = (fnptr); \
398 volatile unsigned long _argvec[1]; \
399 volatile unsigned long _res; \
400 _argvec[0] = (unsigned long)_fnptr; \
401 __asm__ volatile( \
402 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
403 VALGRIND_CALL_NOREDIR_EAX \
404 : /*out*/ "=a" (_res) \
405 : /*in*/ "a" (&_argvec[0]) \
406 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
407 ); \
408 lval = (__typeof__(lval)) _res; \
409 } while (0)
410
411#define CALL_FN_W_W(lval, fnptr, arg1) \
412 do { \
413 volatile void* _fnptr = (fnptr); \
414 volatile unsigned long _argvec[2]; \
415 volatile unsigned long _res; \
416 _argvec[0] = (unsigned long)_fnptr; \
417 _argvec[1] = (unsigned long)(arg1); \
418 __asm__ volatile( \
419 "pushl 4(%%eax)\n\t" \
420 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
421 VALGRIND_CALL_NOREDIR_EAX \
422 "addl $4, %%esp\n" \
423 : /*out*/ "=a" (_res) \
424 : /*in*/ "a" (&_argvec[0]) \
425 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
426 ); \
427 lval = (__typeof__(lval)) _res; \
428 } while (0)
429
430#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
431 do { \
432 volatile void* _fnptr = (fnptr); \
433 volatile unsigned long _argvec[3]; \
434 volatile unsigned long _res; \
435 _argvec[0] = (unsigned long)_fnptr; \
436 _argvec[1] = (unsigned long)(arg1); \
437 _argvec[2] = (unsigned long)(arg2); \
438 __asm__ volatile( \
439 "pushl 8(%%eax)\n\t" \
440 "pushl 4(%%eax)\n\t" \
441 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
442 VALGRIND_CALL_NOREDIR_EAX \
443 "addl $8, %%esp\n" \
444 : /*out*/ "=a" (_res) \
445 : /*in*/ "a" (&_argvec[0]) \
446 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
447 ); \
448 lval = (__typeof__(lval)) _res; \
449 } while (0)
450
451#define CALL_FN_W_WWWW(lval, fnptr, arg1,arg2,arg3,arg4) \
452 do { \
453 volatile void* _fnptr = (fnptr); \
454 volatile unsigned long _argvec[5]; \
455 volatile unsigned long _res; \
456 _argvec[0] = (unsigned long)_fnptr; \
457 _argvec[1] = (unsigned long)(arg1); \
458 _argvec[2] = (unsigned long)(arg2); \
459 _argvec[3] = (unsigned long)(arg3); \
460 _argvec[4] = (unsigned long)(arg4); \
461 __asm__ volatile( \
462 "pushl 16(%%eax)\n\t" \
463 "pushl 12(%%eax)\n\t" \
464 "pushl 8(%%eax)\n\t" \
465 "pushl 4(%%eax)\n\t" \
466 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
467 VALGRIND_CALL_NOREDIR_EAX \
468 "addl $16, %%esp\n" \
469 : /*out*/ "=a" (_res) \
470 : /*in*/ "a" (&_argvec[0]) \
471 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
472 ); \
473 lval = (__typeof__(lval)) _res; \
474 } while (0)
475
476#define CALL_FN_W_5W(lval, fnptr, arg1,arg2,arg3,arg4,arg5) \
477 do { \
478 volatile void* _fnptr = (fnptr); \
479 volatile unsigned long _argvec[6]; \
480 volatile unsigned long _res; \
481 _argvec[0] = (unsigned long)_fnptr; \
482 _argvec[1] = (unsigned long)(arg1); \
483 _argvec[2] = (unsigned long)(arg2); \
484 _argvec[3] = (unsigned long)(arg3); \
485 _argvec[4] = (unsigned long)(arg4); \
486 _argvec[5] = (unsigned long)(arg5); \
487 __asm__ volatile( \
488 "pushl 20(%%eax)\n\t" \
489 "pushl 16(%%eax)\n\t" \
490 "pushl 12(%%eax)\n\t" \
491 "pushl 8(%%eax)\n\t" \
492 "pushl 4(%%eax)\n\t" \
493 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
494 VALGRIND_CALL_NOREDIR_EAX \
495 "addl $20, %%esp\n" \
496 : /*out*/ "=a" (_res) \
497 : /*in*/ "a" (&_argvec[0]) \
498 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
499 ); \
500 lval = (__typeof__(lval)) _res; \
501 } while (0)
502
503#define CALL_FN_W_6W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
504 do { \
505 volatile void* _fnptr = (fnptr); \
506 volatile unsigned long _argvec[7]; \
507 volatile unsigned long _res; \
508 _argvec[0] = (unsigned long)_fnptr; \
509 _argvec[1] = (unsigned long)(arg1); \
510 _argvec[2] = (unsigned long)(arg2); \
511 _argvec[3] = (unsigned long)(arg3); \
512 _argvec[4] = (unsigned long)(arg4); \
513 _argvec[5] = (unsigned long)(arg5); \
514 _argvec[6] = (unsigned long)(arg6); \
515 __asm__ volatile( \
516 "pushl 24(%%eax)\n\t" \
517 "pushl 20(%%eax)\n\t" \
518 "pushl 16(%%eax)\n\t" \
519 "pushl 12(%%eax)\n\t" \
520 "pushl 8(%%eax)\n\t" \
521 "pushl 4(%%eax)\n\t" \
522 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
523 VALGRIND_CALL_NOREDIR_EAX \
524 "addl $24, %%esp\n" \
525 : /*out*/ "=a" (_res) \
526 : /*in*/ "a" (&_argvec[0]) \
527 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
528 ); \
529 lval = (__typeof__(lval)) _res; \
530 } while (0)
531
532#define CALL_FN_W_7W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6, \
533 arg7) \
534 do { \
535 volatile void* _fnptr = (fnptr); \
536 volatile unsigned long _argvec[8]; \
537 volatile unsigned long _res; \
538 _argvec[0] = (unsigned long)_fnptr; \
539 _argvec[1] = (unsigned long)(arg1); \
540 _argvec[2] = (unsigned long)(arg2); \
541 _argvec[3] = (unsigned long)(arg3); \
542 _argvec[4] = (unsigned long)(arg4); \
543 _argvec[5] = (unsigned long)(arg5); \
544 _argvec[6] = (unsigned long)(arg6); \
545 _argvec[7] = (unsigned long)(arg7); \
546 __asm__ volatile( \
547 "pushl 28(%%eax)\n\t" \
548 "pushl 24(%%eax)\n\t" \
549 "pushl 20(%%eax)\n\t" \
550 "pushl 16(%%eax)\n\t" \
551 "pushl 12(%%eax)\n\t" \
552 "pushl 8(%%eax)\n\t" \
553 "pushl 4(%%eax)\n\t" \
554 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
555 VALGRIND_CALL_NOREDIR_EAX \
556 "addl $28, %%esp\n" \
557 : /*out*/ "=a" (_res) \
558 : /*in*/ "a" (&_argvec[0]) \
559 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
560 ); \
561 lval = (__typeof__(lval)) _res; \
562 } while (0)
563
564#define CALL_FN_W_8W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6, \
565 arg7,arg8) \
566 do { \
567 volatile void* _fnptr = (fnptr); \
568 volatile unsigned long _argvec[9]; \
569 volatile unsigned long _res; \
570 _argvec[0] = (unsigned long)_fnptr; \
571 _argvec[1] = (unsigned long)(arg1); \
572 _argvec[2] = (unsigned long)(arg2); \
573 _argvec[3] = (unsigned long)(arg3); \
574 _argvec[4] = (unsigned long)(arg4); \
575 _argvec[5] = (unsigned long)(arg5); \
576 _argvec[6] = (unsigned long)(arg6); \
577 _argvec[7] = (unsigned long)(arg7); \
578 _argvec[8] = (unsigned long)(arg8); \
579 __asm__ volatile( \
580 "pushl 32(%%eax)\n\t" \
581 "pushl 28(%%eax)\n\t" \
582 "pushl 24(%%eax)\n\t" \
583 "pushl 20(%%eax)\n\t" \
584 "pushl 16(%%eax)\n\t" \
585 "pushl 12(%%eax)\n\t" \
586 "pushl 8(%%eax)\n\t" \
587 "pushl 4(%%eax)\n\t" \
588 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
589 VALGRIND_CALL_NOREDIR_EAX \
590 "addl $32, %%esp\n" \
591 : /*out*/ "=a" (_res) \
592 : /*in*/ "a" (&_argvec[0]) \
593 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
594 ); \
595 lval = (__typeof__(lval)) _res; \
596 } while (0)
597
598#define CALL_FN_W_12W(lval, fnptr, arg1,arg2,arg3,arg4,arg5, \
599 arg6,arg7,arg8,arg9,arg10, \
600 arg11,arg12) \
601 do { \
602 volatile void* _fnptr = (fnptr); \
603 volatile unsigned long _argvec[13]; \
604 volatile unsigned long _res; \
605 _argvec[0] = (unsigned long)_fnptr; \
606 _argvec[1] = (unsigned long)(arg1); \
607 _argvec[2] = (unsigned long)(arg2); \
608 _argvec[3] = (unsigned long)(arg3); \
609 _argvec[4] = (unsigned long)(arg4); \
610 _argvec[5] = (unsigned long)(arg5); \
611 _argvec[6] = (unsigned long)(arg6); \
612 _argvec[7] = (unsigned long)(arg7); \
613 _argvec[8] = (unsigned long)(arg8); \
614 _argvec[9] = (unsigned long)(arg9); \
615 _argvec[10] = (unsigned long)(arg10); \
616 _argvec[11] = (unsigned long)(arg11); \
617 _argvec[12] = (unsigned long)(arg12); \
618 __asm__ volatile( \
619 "pushl 48(%%eax)\n\t" \
620 "pushl 44(%%eax)\n\t" \
621 "pushl 40(%%eax)\n\t" \
622 "pushl 36(%%eax)\n\t" \
623 "pushl 32(%%eax)\n\t" \
624 "pushl 28(%%eax)\n\t" \
625 "pushl 24(%%eax)\n\t" \
626 "pushl 20(%%eax)\n\t" \
627 "pushl 16(%%eax)\n\t" \
628 "pushl 12(%%eax)\n\t" \
629 "pushl 8(%%eax)\n\t" \
630 "pushl 4(%%eax)\n\t" \
631 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
632 VALGRIND_CALL_NOREDIR_EAX \
633 "addl $48, %%esp\n" \
634 : /*out*/ "=a" (_res) \
635 : /*in*/ "a" (&_argvec[0]) \
636 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
637 ); \
638 lval = (__typeof__(lval)) _res; \
639 } while (0)
640
641#endif /* ARCH_x86 */
642
643/* --------------------------- amd64 --------------------------- */
644
645#if defined(ARCH_amd64)
646
647/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
648
649/* These regs are trashed by the hidden call. */
650#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
651 "rdi", "r8", "r9", "r10", "r11"
652
653/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
654 long) == 8. */
655
656#define CALL_FN_W_v(lval, fnptr) \
657 do { \
658 volatile void* _fnptr = (fnptr); \
659 volatile unsigned long _argvec[1]; \
660 volatile unsigned long _res; \
661 _argvec[0] = (unsigned long)_fnptr; \
662 __asm__ volatile( \
663 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
664 VALGRIND_CALL_NOREDIR_RAX \
665 : /*out*/ "=a" (_res) \
666 : /*in*/ "a" (&_argvec[0]) \
667 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
668 ); \
669 lval = (__typeof__(lval)) _res; \
670 } while (0)
671
672#define CALL_FN_W_W(lval, fnptr, arg1) \
673 do { \
674 volatile void* _fnptr = (fnptr); \
675 volatile unsigned long _argvec[2]; \
676 volatile unsigned long _res; \
677 _argvec[0] = (unsigned long)_fnptr; \
678 _argvec[1] = (unsigned long)(arg1); \
679 __asm__ volatile( \
680 "movq 8(%%rax), %%rdi\n\t" \
681 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
682 VALGRIND_CALL_NOREDIR_RAX \
683 : /*out*/ "=a" (_res) \
684 : /*in*/ "a" (&_argvec[0]) \
685 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
686 ); \
687 lval = (__typeof__(lval)) _res; \
688 } while (0)
689
690#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
691 do { \
692 volatile void* _fnptr = (fnptr); \
693 volatile unsigned long _argvec[3]; \
694 volatile unsigned long _res; \
695 _argvec[0] = (unsigned long)_fnptr; \
696 _argvec[1] = (unsigned long)(arg1); \
697 _argvec[2] = (unsigned long)(arg2); \
698 __asm__ volatile( \
699 "movq 16(%%rax), %%rsi\n\t" \
700 "movq 8(%%rax), %%rdi\n\t" \
701 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
702 VALGRIND_CALL_NOREDIR_RAX \
703 : /*out*/ "=a" (_res) \
704 : /*in*/ "a" (&_argvec[0]) \
705 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
706 ); \
707 lval = (__typeof__(lval)) _res; \
708 } while (0)
709
710#endif /* ARCH_amd64 */
711
712/* --------------------------- ppc32 --------------------------- */
713
714#if defined(ARCH_ppc32)
715
716/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
717
718/* These regs are trashed by the hidden call. */
719#define __CALLER_SAVED_REGS "lr", \
720 "r0", "r2", "r3", "r4", "r5", "r6", \
721 "r7", "r8", "r9", "r10", "r11", "r12"
722
723/* These CALL_FN_ macros assume that on ppc32-linux, sizeof(unsigned
724 long) == 4. */
725
726#define CALL_FN_W_v(lval, fnptr) \
727 do { \
728 volatile void* _fnptr = (fnptr); \
729 volatile unsigned long _argvec[1]; \
730 volatile unsigned long _res; \
731 _argvec[0] = (unsigned long)_fnptr; \
732 __asm__ volatile( \
733 "mr 11,%1\n\t" \
734 "lwz 11,0(11)\n\t" /* target->r11 */ \
735 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
736 "mr %0,3" \
737 : /*out*/ "=r" (_res) \
738 : /*in*/ "r" (&_argvec[0]) \
739 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
740 ); \
741 lval = (__typeof__(lval)) _res; \
742 } while (0)
743
744#define CALL_FN_W_W(lval, fnptr, arg1) \
745 do { \
746 volatile void* _fnptr = (fnptr); \
747 volatile unsigned long _argvec[2]; \
748 volatile unsigned long _res; \
749 _argvec[0] = (unsigned long)_fnptr; \
750 _argvec[1] = (unsigned long)arg1; \
751 __asm__ volatile( \
752 "mr 11,%1\n\t" \
753 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
754 "lwz 11,0(11)\n\t" /* target->r11 */ \
755 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
756 "mr %0,3" \
757 : /*out*/ "=r" (_res) \
758 : /*in*/ "r" (&_argvec[0]) \
759 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
760 ); \
761 lval = (__typeof__(lval)) _res; \
762 } while (0)
763
764#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
765 do { \
766 volatile void* _fnptr = (fnptr); \
767 volatile unsigned long _argvec[3]; \
768 volatile unsigned long _res; \
769 _argvec[0] = (unsigned long)_fnptr; \
770 _argvec[1] = (unsigned long)arg1; \
771 _argvec[2] = (unsigned long)arg2; \
772 __asm__ volatile( \
773 "mr 11,%1\n\t" \
774 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
775 "lwz 4,8(11)\n\t" \
776 "lwz 11,0(11)\n\t" /* target->r11 */ \
777 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
778 "mr %0,3" \
779 : /*out*/ "=r" (_res) \
780 : /*in*/ "r" (&_argvec[0]) \
781 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
782 ); \
783 lval = (__typeof__(lval)) _res; \
784 } while (0)
785
786#endif /* ARCH_ppc32 */
787
788/* --------------------------- ppc64 --------------------------- */
789
790
791/* ------------------------------------------------------------------ */
792/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
793/* */
njn30d76c62005-06-18 15:07:39 +0000794/* ------------------------------------------------------------------ */
795
sewardj2e93c502002-04-12 11:12:52 +0000796/* Some request codes. There are many more of these, but most are not
797 exposed to end-user view. These are the public ones, all of the
njn25e49d8e72002-09-23 09:36:25 +0000798 form 0x1000 + small_number.
njnd7994182003-10-02 13:44:04 +0000799
sewardj0ec07f32006-01-12 12:32:32 +0000800 Core ones are in the range 0x00000000--0x0000ffff. The non-public
801 ones start at 0x2000.
sewardj2e93c502002-04-12 11:12:52 +0000802*/
803
sewardj0ec07f32006-01-12 12:32:32 +0000804/* These macros are used by tools -- they must be public, but don't
805 embed them into other programs. */
njnfc26ff92004-11-22 19:12:49 +0000806#define VG_USERREQ_TOOL_BASE(a,b) \
njn4c791212003-05-02 17:53:54 +0000807 ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
njnfc26ff92004-11-22 19:12:49 +0000808#define VG_IS_TOOL_USERREQ(a, b, v) \
809 (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
sewardj34042512002-10-22 04:14:35 +0000810
njn25e49d8e72002-09-23 09:36:25 +0000811typedef
njn4c791212003-05-02 17:53:54 +0000812 enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
813 VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
njn3e884182003-04-15 13:03:23 +0000814
sewardj0ec07f32006-01-12 12:32:32 +0000815 /* These allow any function to be called from the simulated
816 CPU but run on the real CPU. Nb: the first arg passed to
817 the function is always the ThreadId of the running
818 thread! So CLIENT_CALL0 actually requires a 1 arg
njnd4795be2004-11-24 11:57:51 +0000819 function, etc. */
njn4c791212003-05-02 17:53:54 +0000820 VG_USERREQ__CLIENT_CALL0 = 0x1101,
821 VG_USERREQ__CLIENT_CALL1 = 0x1102,
822 VG_USERREQ__CLIENT_CALL2 = 0x1103,
823 VG_USERREQ__CLIENT_CALL3 = 0x1104,
njn3e884182003-04-15 13:03:23 +0000824
sewardj0ec07f32006-01-12 12:32:32 +0000825 /* Can be useful in regression testing suites -- eg. can
826 send Valgrind's output to /dev/null and still count
827 errors. */
njn4c791212003-05-02 17:53:54 +0000828 VG_USERREQ__COUNT_ERRORS = 0x1201,
njn47363ab2003-04-21 13:24:40 +0000829
sewardj0ec07f32006-01-12 12:32:32 +0000830 /* These are useful and can be interpreted by any tool that
831 tracks malloc() et al, by using vg_replace_malloc.c. */
njnd7994182003-10-02 13:44:04 +0000832 VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
833 VG_USERREQ__FREELIKE_BLOCK = 0x1302,
rjwalshbc0bb832004-06-19 18:12:36 +0000834 /* Memory pool support. */
835 VG_USERREQ__CREATE_MEMPOOL = 0x1303,
836 VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
837 VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
838 VG_USERREQ__MEMPOOL_FREE = 0x1306,
njnd7994182003-10-02 13:44:04 +0000839
fitzhardinge39de4b42003-10-31 07:12:21 +0000840 /* Allow printfs to valgrind log. */
njn30d76c62005-06-18 15:07:39 +0000841 VG_USERREQ__PRINTF = 0x1401,
rjwalsh0140af52005-06-04 20:42:33 +0000842 VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
843
844 /* Stack support. */
845 VG_USERREQ__STACK_REGISTER = 0x1501,
846 VG_USERREQ__STACK_DEREGISTER = 0x1502,
847 VG_USERREQ__STACK_CHANGE = 0x1503,
njn25e49d8e72002-09-23 09:36:25 +0000848 } Vg_ClientRequest;
sewardj2e93c502002-04-12 11:12:52 +0000849
sewardj0ec07f32006-01-12 12:32:32 +0000850#if !defined(__GNUC__)
851# define __extension__ /* */
muellerc9b36552003-12-31 14:32:23 +0000852#endif
sewardj2e93c502002-04-12 11:12:52 +0000853
sewardj0ec07f32006-01-12 12:32:32 +0000854/* Returns the number of Valgrinds this code is running under. That
855 is, 0 if running natively, 1 if running under Valgrind, 2 if
856 running under Valgrind which is running under another Valgrind,
857 etc. */
858#define RUNNING_ON_VALGRIND __extension__ \
859 ({unsigned int _qzz_res; \
860 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
861 VG_USERREQ__RUNNING_ON_VALGRIND, \
862 0, 0, 0, 0); \
863 _qzz_res; \
sewardjde4a1d02002-03-22 01:27:54 +0000864 })
865
866
sewardj18d75132002-05-16 11:06:21 +0000867/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
868 _qzz_len - 1]. Useful if you are debugging a JITter or some such,
869 since it provides a way to make sure valgrind will retranslate the
870 invalidated area. Returns no value. */
sewardj0ec07f32006-01-12 12:32:32 +0000871#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
872 {unsigned int _qzz_res; \
873 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
874 VG_USERREQ__DISCARD_TRANSLATIONS, \
875 _qzz_addr, _qzz_len, 0, 0); \
sewardj18d75132002-05-16 11:06:21 +0000876 }
877
njn26aba4d2005-05-16 13:31:23 +0000878
sewardj0ec07f32006-01-12 12:32:32 +0000879/* These requests are for getting Valgrind itself to print something.
880 Possibly with a backtrace. This is a really ugly hack. */
881
882#if defined(NVALGRIND)
883
884# define VALGRIND_PRINTF(...)
885# define VALGRIND_PRINTF_BACKTRACE(...)
njn26aba4d2005-05-16 13:31:23 +0000886
887#else /* NVALGRIND */
fitzhardinge39de4b42003-10-31 07:12:21 +0000888
fitzhardingea09a1b52003-11-07 23:09:48 +0000889int VALGRIND_PRINTF(const char *format, ...)
890 __attribute__((format(__printf__, 1, 2)));
fitzhardinge39de4b42003-10-31 07:12:21 +0000891__attribute__((weak))
892int
fitzhardingea09a1b52003-11-07 23:09:48 +0000893VALGRIND_PRINTF(const char *format, ...)
fitzhardinge39de4b42003-10-31 07:12:21 +0000894{
njnc6168192004-11-29 13:54:10 +0000895 unsigned long _qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000896 va_list vargs;
897 va_start(vargs, format);
sewardj0ec07f32006-01-12 12:32:32 +0000898 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
njnc6168192004-11-29 13:54:10 +0000899 (unsigned long)format, (unsigned long)vargs, 0, 0);
fitzhardinge39de4b42003-10-31 07:12:21 +0000900 va_end(vargs);
njnc6168192004-11-29 13:54:10 +0000901 return (int)_qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000902}
903
fitzhardingea09a1b52003-11-07 23:09:48 +0000904int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
905 __attribute__((format(__printf__, 1, 2)));
fitzhardinge39de4b42003-10-31 07:12:21 +0000906__attribute__((weak))
907int
fitzhardingea09a1b52003-11-07 23:09:48 +0000908VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
fitzhardinge39de4b42003-10-31 07:12:21 +0000909{
njnc6168192004-11-29 13:54:10 +0000910 unsigned long _qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000911 va_list vargs;
912 va_start(vargs, format);
sewardj0ec07f32006-01-12 12:32:32 +0000913 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
njnc6168192004-11-29 13:54:10 +0000914 (unsigned long)format, (unsigned long)vargs, 0, 0);
fitzhardinge39de4b42003-10-31 07:12:21 +0000915 va_end(vargs);
njnc6168192004-11-29 13:54:10 +0000916 return (int)_qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000917}
918
fitzhardinge39de4b42003-10-31 07:12:21 +0000919#endif /* NVALGRIND */
sewardj18d75132002-05-16 11:06:21 +0000920
sewardj0ec07f32006-01-12 12:32:32 +0000921
njn3e884182003-04-15 13:03:23 +0000922/* These requests allow control to move from the simulated CPU to the
923 real CPU, calling an arbitary function */
sewardj0ec07f32006-01-12 12:32:32 +0000924#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
925 ({unsigned long _qyy_res; \
926 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
927 VG_USERREQ__CLIENT_CALL0, \
928 _qyy_fn, \
929 0, 0, 0); \
930 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000931 })
932
sewardj0ec07f32006-01-12 12:32:32 +0000933#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
934 ({unsigned long _qyy_res; \
935 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
936 VG_USERREQ__CLIENT_CALL1, \
937 _qyy_fn, \
938 _qyy_arg1, 0, 0); \
939 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000940 })
941
sewardj0ec07f32006-01-12 12:32:32 +0000942#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
943 ({unsigned long _qyy_res; \
944 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
945 VG_USERREQ__CLIENT_CALL2, \
946 _qyy_fn, \
947 _qyy_arg1, _qyy_arg2, 0); \
948 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000949 })
950
sewardj0ec07f32006-01-12 12:32:32 +0000951#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
952 ({unsigned long _qyy_res; \
953 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
954 VG_USERREQ__CLIENT_CALL3, \
955 _qyy_fn, \
956 _qyy_arg1, _qyy_arg2, _qyy_arg3); \
957 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000958 })
959
960
nethercote7cc9c232004-01-21 15:08:04 +0000961/* Counts the number of errors that have been recorded by a tool. Nb:
962 the tool must record the errors with VG_(maybe_record_error)() or
njn47363ab2003-04-21 13:24:40 +0000963 VG_(unique_error)() for them to be counted. */
sewardj0ec07f32006-01-12 12:32:32 +0000964#define VALGRIND_COUNT_ERRORS \
965 ({unsigned int _qyy_res; \
966 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
967 VG_USERREQ__COUNT_ERRORS, \
968 0, 0, 0, 0); \
969 _qyy_res; \
njn47363ab2003-04-21 13:24:40 +0000970 })
971
njnd7994182003-10-02 13:44:04 +0000972/* Mark a block of memory as having been allocated by a malloc()-like
973 function. `addr' is the start of the usable block (ie. after any
974 redzone) `rzB' is redzone size if the allocator can apply redzones;
975 use '0' if not. Adding redzones makes it more likely Valgrind will spot
976 block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
977 for calloc(). Put it immediately after the point where a block is
978 allocated.
979
980 If you're allocating memory via superblocks, and then handing out small
981 chunks of each superblock, if you don't have redzones on your small
982 blocks, it's worth marking the superblock with VALGRIND_MAKE_NOACCESS
983 when it's created, so that block overruns are detected. But if you can
984 put redzones on, it's probably better to not do this, so that messages
985 for small overruns are described in terms of the small block rather than
986 the superblock (but if you have a big overrun that skips over a redzone,
987 you could miss an error this way). See memcheck/tests/custom_alloc.c
988 for an example.
989
990 Nb: block must be freed via a free()-like function specified
991 with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
sewardj0ec07f32006-01-12 12:32:32 +0000992#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
993 {unsigned int _qzz_res; \
994 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
995 VG_USERREQ__MALLOCLIKE_BLOCK, \
996 addr, sizeB, rzB, is_zeroed); \
njnd7994182003-10-02 13:44:04 +0000997 }
998
999/* Mark a block of memory as having been freed by a free()-like function.
1000 `rzB' is redzone size; it must match that given to
1001 VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
1002 checker. Put it immediately after the point where the block is freed. */
sewardj0ec07f32006-01-12 12:32:32 +00001003#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
1004 {unsigned int _qzz_res; \
1005 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1006 VG_USERREQ__FREELIKE_BLOCK, \
1007 addr, rzB, 0, 0); \
njnd7994182003-10-02 13:44:04 +00001008 }
1009
rjwalshbc0bb832004-06-19 18:12:36 +00001010/* Create a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001011#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
1012 {unsigned int _qzz_res; \
1013 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1014 VG_USERREQ__CREATE_MEMPOOL, \
1015 pool, rzB, is_zeroed, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001016 }
1017
1018/* Destroy a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001019#define VALGRIND_DESTROY_MEMPOOL(pool) \
1020 {unsigned int _qzz_res; \
1021 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1022 VG_USERREQ__DESTROY_MEMPOOL, \
1023 pool, 0, 0, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001024 }
1025
1026/* Associate a piece of memory with a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001027#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
1028 {unsigned int _qzz_res; \
1029 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1030 VG_USERREQ__MEMPOOL_ALLOC, \
1031 pool, addr, size, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001032 }
1033
1034/* Disassociate a piece of memory from a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001035#define VALGRIND_MEMPOOL_FREE(pool, addr) \
1036 {unsigned int _qzz_res; \
1037 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1038 VG_USERREQ__MEMPOOL_FREE, \
1039 pool, addr, 0, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001040 }
1041
rjwalsh0140af52005-06-04 20:42:33 +00001042/* Mark a piece of memory as being a stack. Returns a stack id. */
sewardj0ec07f32006-01-12 12:32:32 +00001043#define VALGRIND_STACK_REGISTER(start, end) \
1044 ({unsigned int _qzz_res; \
1045 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1046 VG_USERREQ__STACK_REGISTER, \
1047 start, end, 0, 0); \
1048 _qzz_res; \
rjwalsh0140af52005-06-04 20:42:33 +00001049 })
1050
1051/* Unmark the piece of memory associated with a stack id as being a
1052 stack. */
sewardj0ec07f32006-01-12 12:32:32 +00001053#define VALGRIND_STACK_DEREGISTER(id) \
1054 {unsigned int _qzz_res; \
1055 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1056 VG_USERREQ__STACK_DEREGISTER, \
1057 id, 0, 0, 0); \
rjwalsh0140af52005-06-04 20:42:33 +00001058 }
1059
1060/* Change the start and end address of the stack id. */
sewardj0ec07f32006-01-12 12:32:32 +00001061#define VALGRIND_STACK_CHANGE(id, start, end) \
1062 {unsigned int _qzz_res; \
1063 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1064 VG_USERREQ__STACK_CHANGE, \
1065 id, start, end, 0); \
rjwalsh0140af52005-06-04 20:42:33 +00001066 }
1067
sewardj0ec07f32006-01-12 12:32:32 +00001068
1069#undef ARCH_x86
1070#undef ARCH_amd64
1071#undef ARCH_ppc32
1072#undef ARCH_ppc64
1073
njn3e884182003-04-15 13:03:23 +00001074#endif /* __VALGRIND_H */