blob: 0865fa95d1d51a055ff1f20154d0e73d7fec8ff1 [file] [log] [blame]
sewardjb5f6f512005-03-10 23:59:00 +00001/* -*- c -*-
njn25e49d8e72002-09-23 09:36:25 +00002 ----------------------------------------------------------------
3
4 Notice that the following BSD-style license applies to this one
5 file (valgrind.h) only. The entire rest of Valgrind is licensed
6 under the terms of the GNU General Public License, version 2. See
7 the COPYING file in the source distribution for details.
8
9 ----------------------------------------------------------------
10
njnb9c427c2004-12-01 14:14:42 +000011 This file is part of Valgrind, a dynamic binary instrumentation
12 framework.
sewardjde4a1d02002-03-22 01:27:54 +000013
njn53612422005-03-12 16:22:54 +000014 Copyright (C) 2000-2005 Julian Seward. All rights reserved.
sewardjde4a1d02002-03-22 01:27:54 +000015
njn25e49d8e72002-09-23 09:36:25 +000016 Redistribution and use in source and binary forms, with or without
17 modification, are permitted provided that the following conditions
18 are met:
sewardjde4a1d02002-03-22 01:27:54 +000019
njn25e49d8e72002-09-23 09:36:25 +000020 1. Redistributions of source code must retain the above copyright
21 notice, this list of conditions and the following disclaimer.
sewardjde4a1d02002-03-22 01:27:54 +000022
njn25e49d8e72002-09-23 09:36:25 +000023 2. The origin of this software must not be misrepresented; you must
24 not claim that you wrote the original software. If you use this
25 software in a product, an acknowledgment in the product
26 documentation would be appreciated but is not required.
sewardjde4a1d02002-03-22 01:27:54 +000027
njn25e49d8e72002-09-23 09:36:25 +000028 3. Altered source versions must be plainly marked as such, and must
29 not be misrepresented as being the original software.
30
31 4. The name of the author may not be used to endorse or promote
32 products derived from this software without specific prior written
33 permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
36 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
37 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
39 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
41 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
42 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
43 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46
47 ----------------------------------------------------------------
48
49 Notice that the above BSD-style license applies to this one file
50 (valgrind.h) only. The entire rest of Valgrind is licensed under
51 the terms of the GNU General Public License, version 2. See the
52 COPYING file in the source distribution for details.
53
54 ----------------------------------------------------------------
sewardjde4a1d02002-03-22 01:27:54 +000055*/
56
57
njn30d76c62005-06-18 15:07:39 +000058/* This file is for inclusion into client (your!) code.
59
60 You can use these macros to manipulate and query Valgrind's
61 execution inside your own programs.
62
63 The resulting executables will still run without Valgrind, just a
64 little bit more slowly than they otherwise would, but otherwise
65 unchanged. When not running on valgrind, each client request
sewardj0ec07f32006-01-12 12:32:32 +000066 consumes very few (eg. 7) instructions, so the resulting performance
njn30d76c62005-06-18 15:07:39 +000067 loss is negligible unless you plan to execute client requests
68 millions of times per second. Nevertheless, if that is still a
69 problem, you can compile with the NVALGRIND symbol defined (gcc
70 -DNVALGRIND) so that client requests are not even compiled in. */
71
sewardjde4a1d02002-03-22 01:27:54 +000072#ifndef __VALGRIND_H
73#define __VALGRIND_H
74
fitzhardinge39de4b42003-10-31 07:12:21 +000075#include <stdarg.h>
76
njn3dd0a912005-06-28 19:44:10 +000077/* Nb: this file might be included in a file compiled with -ansi. So
78 we can't use C++ style "//" comments nor the "asm" keyword (instead
79 use "__asm__"). */
80
sewardj0ec07f32006-01-12 12:32:32 +000081/* Derive some tags indicating what the target architecture is. Note
82 that in this file we're using the compiler's CPP symbols for
83 identifying architectures, which are different to the ones we use
84 within the rest of Valgrind. Note, __powerpc__ is active for both
85 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
86 latter. */
87#undef ARCH_x86
88#undef ARCH_amd64
89#undef ARCH_ppc32
90#undef ARCH_ppc64
91
92#if defined(__i386__)
93# define ARCH_x86 1
94#elif defined(__x86_64__)
95# define ARCH_amd64 1
96#elif defined(__powerpc__) && !defined(__powerpc64__)
97# define ARCH_ppc32 1
98#elif defined(__powerpc__) && defined(__powerpc64__)
99# define ARCH_ppc64 1
sewardjb5f6f512005-03-10 23:59:00 +0000100#endif
101
sewardj0ec07f32006-01-12 12:32:32 +0000102/* If we're not compiling for our target architecture, don't generate
103 any inline asms. */
104#if !defined(ARCH_x86) && !defined(ARCH_amd64) \
105 && !defined(ARCH_ppc32) && !defined(ARCH_ppc64)
106# if !defined(NVALGRIND)
107# define NVALGRIND 1
108# endif
109#endif
110
111
njn30d76c62005-06-18 15:07:39 +0000112/* ------------------------------------------------------------------ */
sewardj0ec07f32006-01-12 12:32:32 +0000113/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
114/* in here of use to end-users -- skip to the next section. */
njn30d76c62005-06-18 15:07:39 +0000115/* ------------------------------------------------------------------ */
sewardjde4a1d02002-03-22 01:27:54 +0000116
sewardj0ec07f32006-01-12 12:32:32 +0000117#if defined(NVALGRIND)
njn26aba4d2005-05-16 13:31:23 +0000118
119/* Define NVALGRIND to completely remove the Valgrind magic sequence
sewardj0ec07f32006-01-12 12:32:32 +0000120 from the compiled code (analogous to NDEBUG's effects on
121 assert()) */
122#define VALGRIND_DO_CLIENT_REQUEST( \
123 _zzq_rlval, _zzq_default, _zzq_request, \
124 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
125 { \
126 (_zzq_rlval) = (_zzq_default); \
njn26aba4d2005-05-16 13:31:23 +0000127 }
128
sewardj0ec07f32006-01-12 12:32:32 +0000129#else /* ! NVALGRIND */
nethercotee90c6832004-10-18 18:07:49 +0000130
sewardj0ec07f32006-01-12 12:32:32 +0000131/* The following defines the magic code sequences which the JITter
132 spots and handles magically. Don't look too closely at them as
133 they will rot your brain.
134
135 The assembly code sequences for all architectures is in this one
136 file. This is because this file must be stand-alone, and we don't
137 want to have multiple files.
138
139 For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
140 value gets put in the return slot, so that everything works when
141 this is executed not under Valgrind. Args are passed in a memory
142 block, and so there's no intrinsic limit to the number that could
143 be passed, but it's currently four.
nethercotee90c6832004-10-18 18:07:49 +0000144
nethercote54265442004-10-26 12:56:58 +0000145 The macro args are:
146 _zzq_rlval result lvalue
147 _zzq_default default value (result returned when running on real CPU)
148 _zzq_request request code
149 _zzq_arg1..4 request params
150
sewardj0ec07f32006-01-12 12:32:32 +0000151 The other two macros are used to support function wrapping, and are
152 a lot simpler. VALGRIND_GET_NRADDR returns the value of the
153 guest's NRADDR pseudo-register. VALGRIND_CALL_NOREDIR_* behaves
154 the same as the following on the guest, but guarantees that the
155 branch instruction will not be redirected: x86: call *%eax, amd64:
156 call *%rax, ppc32/ppc64: bctrl. VALGRIND_CALL_NOREDIR is just
157 text, not a complete inline asm, since it needs to be combined with
158 more magic inline asm stuff to be useful.
nethercotee90c6832004-10-18 18:07:49 +0000159*/
160
sewardj0ec07f32006-01-12 12:32:32 +0000161/* ---------------------------- x86 ---------------------------- */
sewardjde4a4ab2005-03-23 13:10:32 +0000162
sewardj0ec07f32006-01-12 12:32:32 +0000163#if defined(ARCH_x86)
164#define __SPECIAL_INSTRUCTION_PREAMBLE \
165 "roll $3, %%edi ; roll $13, %%edi\n\t" \
sewardj1a85f4f2006-01-12 21:15:35 +0000166 "roll $29, %%edi ; roll $19, %%edi\n\t"
sewardjde4a4ab2005-03-23 13:10:32 +0000167
sewardj0ec07f32006-01-12 12:32:32 +0000168#define VALGRIND_DO_CLIENT_REQUEST( \
169 _zzq_rlval, _zzq_default, _zzq_request, \
170 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
171 { volatile unsigned int _zzq_args[5]; \
172 volatile unsigned int _zzq_result; \
173 _zzq_args[0] = (unsigned int)(_zzq_request); \
174 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
175 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
176 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
177 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
178 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
179 /* %EDX = client_request ( %EAX ) */ \
180 "xchgl %%ebx,%%ebx" \
181 : "=d" (_zzq_result) \
182 : "a" (&_zzq_args[0]), "0" (_zzq_default) \
183 : "cc", "memory" \
184 ); \
185 _zzq_rlval = _zzq_result; \
cerion85665ca2005-06-20 15:51:07 +0000186 }
sewardj2c48c7b2005-11-29 13:05:56 +0000187
sewardj0ec07f32006-01-12 12:32:32 +0000188#define VALGRIND_GET_NRADDR(_zzq_rlval) \
189 { volatile unsigned int __addr; \
190 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
191 /* %EAX = guest_NRADDR */ \
192 "xchgl %%ecx,%%ecx" \
193 : "=a" (__addr) \
194 : \
195 : "cc", "memory" \
196 ); \
197 _zzq_rlval = (void*)__addr; \
sewardj2c48c7b2005-11-29 13:05:56 +0000198 }
sewardj0ec07f32006-01-12 12:32:32 +0000199
200#define VALGRIND_CALL_NOREDIR_EAX \
201 __SPECIAL_INSTRUCTION_PREAMBLE \
202 /* call-noredir *%EAX */ \
203 "xchgl %%edx,%%edx\n\t"
204#endif /* ARCH_x86 */
205
206/* --------------------------- amd64 --------------------------- */
207
208#if defined(ARCH_amd64)
209#define __SPECIAL_INSTRUCTION_PREAMBLE \
210 "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
sewardj1a85f4f2006-01-12 21:15:35 +0000211 "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
sewardj0ec07f32006-01-12 12:32:32 +0000212
213#define VALGRIND_DO_CLIENT_REQUEST( \
214 _zzq_rlval, _zzq_default, _zzq_request, \
215 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
216 { volatile unsigned long long int _zzq_args[5]; \
217 volatile unsigned long long int _zzq_result; \
218 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
219 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
220 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
221 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
222 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
223 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
224 /* %RDX = client_request ( %RAX ) */ \
225 "xchgq %%rbx,%%rbx" \
226 : "=d" (_zzq_result) \
227 : "a" (&_zzq_args[0]), "0" (_zzq_default) \
228 : "cc", "memory" \
229 ); \
230 _zzq_rlval = _zzq_result; \
231 }
232
233#define VALGRIND_GET_NRADDR(_zzq_rlval) \
234 { volatile unsigned long long int __addr; \
235 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
236 /* %RAX = guest_NRADDR */ \
237 "xchgq %%rcx,%%rcx" \
238 : "=a" (__addr) \
239 : \
240 : "cc", "memory" \
241 ); \
242 _zzq_rlval = (void*)__addr; \
243 }
244
245#define VALGRIND_CALL_NOREDIR_RAX \
246 __SPECIAL_INSTRUCTION_PREAMBLE \
247 /* call-noredir *%RAX */ \
248 "xchgq %%rdx,%%rdx\n\t"
249#endif /* ARCH_amd64 */
250
251/* --------------------------- ppc32 --------------------------- */
252
253#if defined(ARCH_ppc32)
254#define __SPECIAL_INSTRUCTION_PREAMBLE \
255 "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
sewardj1a85f4f2006-01-12 21:15:35 +0000256 "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
sewardj0ec07f32006-01-12 12:32:32 +0000257
258#define VALGRIND_DO_CLIENT_REQUEST( \
259 _zzq_rlval, _zzq_default, _zzq_request, \
260 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
261 \
sewardj1a85f4f2006-01-12 21:15:35 +0000262 { unsigned int _zzq_args[5]; \
263 register unsigned int _zzq_result __asm__("r3"); \
264 register unsigned int* _zzq_ptr __asm__("r4"); \
sewardj0ec07f32006-01-12 12:32:32 +0000265 _zzq_args[0] = (unsigned int)(_zzq_request); \
266 _zzq_args[1] = (unsigned int)(_zzq_arg1); \
267 _zzq_args[2] = (unsigned int)(_zzq_arg2); \
268 _zzq_args[3] = (unsigned int)(_zzq_arg3); \
269 _zzq_args[4] = (unsigned int)(_zzq_arg4); \
270 _zzq_ptr = _zzq_args; \
271 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
272 /* %R3 = client_request ( %R4 ) */ \
273 "or 1,1,1" \
274 : "=r" (_zzq_result) \
275 : "0" (_zzq_default), "r" (_zzq_ptr) \
276 : "cc", "memory"); \
277 _zzq_rlval = _zzq_result; \
278 }
279
280#define VALGRIND_GET_NRADDR(_zzq_rlval) \
281 { register unsigned int __addr __asm__("r3"); \
282 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
283 /* %R3 = guest_NRADDR */ \
284 "or 2,2,2" \
285 : "=r" (__addr) \
286 : \
287 : "cc", "memory" \
288 ); \
289 _zzq_rlval = (void*)__addr; \
290 }
291
292#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
293 __SPECIAL_INSTRUCTION_PREAMBLE \
294 /* branch-and-link-to-noredir *%R11 */ \
295 "or 3,3,3\n\t"
296#endif /* ARCH_ppc32 */
297
298/* --------------------------- ppc64 --------------------------- */
299
300#if defined(ARCH_ppc64)
sewardj1a85f4f2006-01-12 21:15:35 +0000301#define __SPECIAL_INSTRUCTION_PREAMBLE \
302 "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
303 "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
304
sewardj0ec07f32006-01-12 12:32:32 +0000305#define VALGRIND_DO_CLIENT_REQUEST( \
306 _zzq_rlval, _zzq_default, _zzq_request, \
307 _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
308 \
sewardj1a85f4f2006-01-12 21:15:35 +0000309 { unsigned long long int _zzq_args[5]; \
310 register unsigned long long int _zzq_result __asm__("r3"); \
311 register unsigned long long int* _zzq_ptr __asm__("r4"); \
312 _zzq_args[0] = (unsigned long long int)(_zzq_request); \
313 _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
314 _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
315 _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
316 _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
sewardj0ec07f32006-01-12 12:32:32 +0000317 _zzq_ptr = _zzq_args; \
sewardj1a85f4f2006-01-12 21:15:35 +0000318 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
319 /* %R3 = client_request ( %R4 ) */ \
320 "or 1,1,1" \
321 : "=r" (_zzq_result) \
sewardj0ec07f32006-01-12 12:32:32 +0000322 : "0" (_zzq_default), "r" (_zzq_ptr) \
sewardj1a85f4f2006-01-12 21:15:35 +0000323 : "cc", "memory"); \
324 _zzq_rlval = _zzq_result; \
sewardj0ec07f32006-01-12 12:32:32 +0000325 }
sewardj1a85f4f2006-01-12 21:15:35 +0000326
327#define VALGRIND_GET_NRADDR(_zzq_rlval) \
328 { register unsigned long long int __addr __asm__("r3"); \
329 __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
330 /* %R3 = guest_NRADDR */ \
331 "or 2,2,2" \
332 : "=r" (__addr) \
333 : \
334 : "cc", "memory" \
335 ); \
336 _zzq_rlval = (void*)__addr; \
337 }
338
339#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
340 __SPECIAL_INSTRUCTION_PREAMBLE \
341 /* branch-and-link-to-noredir *%R11 */ \
342 "or 3,3,3\n\t"
343
sewardj0ec07f32006-01-12 12:32:32 +0000344#endif /* ARCH_ppc64 */
cerion85665ca2005-06-20 15:51:07 +0000345
njn3dd0a912005-06-28 19:44:10 +0000346/* Insert assembly code for other architectures here... */
njn26aba4d2005-05-16 13:31:23 +0000347
sewardj37091fb2002-11-16 11:06:50 +0000348#endif /* NVALGRIND */
sewardj2e93c502002-04-12 11:12:52 +0000349
nethercote69d9c462004-10-26 13:00:12 +0000350
njn30d76c62005-06-18 15:07:39 +0000351/* ------------------------------------------------------------------ */
sewardj0ec07f32006-01-12 12:32:32 +0000352/* ARCHITECTURE SPECIFICS for FUNCTION WRAPPING. This is all very */
353/* ugly. It's the least-worst tradeoff I can think of. */
354/* ------------------------------------------------------------------ */
355
356/* This section defines magic (a.k.a appalling-hack) macros for doing
357 guaranteed-no-redirection macros, so as to get from function
358 wrappers to the functions they are wrapping. The whole point is to
359 construct standard call sequences, but to do the call itself with a
360 special no-redirect call pseudo-instruction that the JIT
361 understands and handles specially. This section is long and
362 repetitious, and I can't see a way to make it shorter.
363
364 The naming scheme is as follows:
365
366 CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
367
368 'W' stands for "word" and 'v' for "void". Hence there are
369 different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
370 and for each, the possibility of returning a word-typed result, or
371 no result.
372*/
373
374/* Use these to write the name of your wrapper. NOTE: duplicates
375 VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
376
377#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
378 _vgwZU_##soname##_##fnname
379
380#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
381 _vgwZZ_##soname##_##fnname
382
383/* Use this macro from within a wrapper function to get the address of
384 the original function. Once you have that you can then use it in
385 one of the CALL_FN_ macros. */
386#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NRADDR(_lval)
387
388/* Derivatives of the main macros below, for calling functions
389 returning void. */
390
391#define CALL_FN_v_v(fnptr) \
392 do { volatile unsigned long _junk; \
393 CALL_FN_W_v(_junk,fnptr); } while (0)
394
395#define CALL_FN_v_W(fnptr, arg1) \
396 do { volatile unsigned long _junk; \
397 CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
398
399#define CALL_FN_v_WW(fnptr, arg1,arg2) \
400 do { volatile unsigned long _junk; \
401 CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
402
403/* ---------------------------- x86 ---------------------------- */
404
405#if defined(ARCH_x86)
406
407/* These regs are trashed by the hidden call. No need to mention eax
408 as gcc can already see that, plus causes gcc to bomb. */
409#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
410
411/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
412 long) == 4. */
413
414#define CALL_FN_W_v(lval, fnptr) \
415 do { \
416 volatile void* _fnptr = (fnptr); \
417 volatile unsigned long _argvec[1]; \
418 volatile unsigned long _res; \
419 _argvec[0] = (unsigned long)_fnptr; \
420 __asm__ volatile( \
421 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
422 VALGRIND_CALL_NOREDIR_EAX \
423 : /*out*/ "=a" (_res) \
424 : /*in*/ "a" (&_argvec[0]) \
425 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
426 ); \
427 lval = (__typeof__(lval)) _res; \
428 } while (0)
429
430#define CALL_FN_W_W(lval, fnptr, arg1) \
431 do { \
432 volatile void* _fnptr = (fnptr); \
433 volatile unsigned long _argvec[2]; \
434 volatile unsigned long _res; \
435 _argvec[0] = (unsigned long)_fnptr; \
436 _argvec[1] = (unsigned long)(arg1); \
437 __asm__ volatile( \
438 "pushl 4(%%eax)\n\t" \
439 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
440 VALGRIND_CALL_NOREDIR_EAX \
441 "addl $4, %%esp\n" \
442 : /*out*/ "=a" (_res) \
443 : /*in*/ "a" (&_argvec[0]) \
444 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
445 ); \
446 lval = (__typeof__(lval)) _res; \
447 } while (0)
448
449#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
450 do { \
451 volatile void* _fnptr = (fnptr); \
452 volatile unsigned long _argvec[3]; \
453 volatile unsigned long _res; \
454 _argvec[0] = (unsigned long)_fnptr; \
455 _argvec[1] = (unsigned long)(arg1); \
456 _argvec[2] = (unsigned long)(arg2); \
457 __asm__ volatile( \
458 "pushl 8(%%eax)\n\t" \
459 "pushl 4(%%eax)\n\t" \
460 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
461 VALGRIND_CALL_NOREDIR_EAX \
462 "addl $8, %%esp\n" \
463 : /*out*/ "=a" (_res) \
464 : /*in*/ "a" (&_argvec[0]) \
465 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
466 ); \
467 lval = (__typeof__(lval)) _res; \
468 } while (0)
469
470#define CALL_FN_W_WWWW(lval, fnptr, arg1,arg2,arg3,arg4) \
471 do { \
472 volatile void* _fnptr = (fnptr); \
473 volatile unsigned long _argvec[5]; \
474 volatile unsigned long _res; \
475 _argvec[0] = (unsigned long)_fnptr; \
476 _argvec[1] = (unsigned long)(arg1); \
477 _argvec[2] = (unsigned long)(arg2); \
478 _argvec[3] = (unsigned long)(arg3); \
479 _argvec[4] = (unsigned long)(arg4); \
480 __asm__ volatile( \
481 "pushl 16(%%eax)\n\t" \
482 "pushl 12(%%eax)\n\t" \
483 "pushl 8(%%eax)\n\t" \
484 "pushl 4(%%eax)\n\t" \
485 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
486 VALGRIND_CALL_NOREDIR_EAX \
487 "addl $16, %%esp\n" \
488 : /*out*/ "=a" (_res) \
489 : /*in*/ "a" (&_argvec[0]) \
490 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
491 ); \
492 lval = (__typeof__(lval)) _res; \
493 } while (0)
494
495#define CALL_FN_W_5W(lval, fnptr, arg1,arg2,arg3,arg4,arg5) \
496 do { \
497 volatile void* _fnptr = (fnptr); \
498 volatile unsigned long _argvec[6]; \
499 volatile unsigned long _res; \
500 _argvec[0] = (unsigned long)_fnptr; \
501 _argvec[1] = (unsigned long)(arg1); \
502 _argvec[2] = (unsigned long)(arg2); \
503 _argvec[3] = (unsigned long)(arg3); \
504 _argvec[4] = (unsigned long)(arg4); \
505 _argvec[5] = (unsigned long)(arg5); \
506 __asm__ volatile( \
507 "pushl 20(%%eax)\n\t" \
508 "pushl 16(%%eax)\n\t" \
509 "pushl 12(%%eax)\n\t" \
510 "pushl 8(%%eax)\n\t" \
511 "pushl 4(%%eax)\n\t" \
512 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
513 VALGRIND_CALL_NOREDIR_EAX \
514 "addl $20, %%esp\n" \
515 : /*out*/ "=a" (_res) \
516 : /*in*/ "a" (&_argvec[0]) \
517 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
518 ); \
519 lval = (__typeof__(lval)) _res; \
520 } while (0)
521
522#define CALL_FN_W_6W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
523 do { \
524 volatile void* _fnptr = (fnptr); \
525 volatile unsigned long _argvec[7]; \
526 volatile unsigned long _res; \
527 _argvec[0] = (unsigned long)_fnptr; \
528 _argvec[1] = (unsigned long)(arg1); \
529 _argvec[2] = (unsigned long)(arg2); \
530 _argvec[3] = (unsigned long)(arg3); \
531 _argvec[4] = (unsigned long)(arg4); \
532 _argvec[5] = (unsigned long)(arg5); \
533 _argvec[6] = (unsigned long)(arg6); \
534 __asm__ volatile( \
535 "pushl 24(%%eax)\n\t" \
536 "pushl 20(%%eax)\n\t" \
537 "pushl 16(%%eax)\n\t" \
538 "pushl 12(%%eax)\n\t" \
539 "pushl 8(%%eax)\n\t" \
540 "pushl 4(%%eax)\n\t" \
541 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
542 VALGRIND_CALL_NOREDIR_EAX \
543 "addl $24, %%esp\n" \
544 : /*out*/ "=a" (_res) \
545 : /*in*/ "a" (&_argvec[0]) \
546 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
547 ); \
548 lval = (__typeof__(lval)) _res; \
549 } while (0)
550
551#define CALL_FN_W_7W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6, \
552 arg7) \
553 do { \
554 volatile void* _fnptr = (fnptr); \
555 volatile unsigned long _argvec[8]; \
556 volatile unsigned long _res; \
557 _argvec[0] = (unsigned long)_fnptr; \
558 _argvec[1] = (unsigned long)(arg1); \
559 _argvec[2] = (unsigned long)(arg2); \
560 _argvec[3] = (unsigned long)(arg3); \
561 _argvec[4] = (unsigned long)(arg4); \
562 _argvec[5] = (unsigned long)(arg5); \
563 _argvec[6] = (unsigned long)(arg6); \
564 _argvec[7] = (unsigned long)(arg7); \
565 __asm__ volatile( \
566 "pushl 28(%%eax)\n\t" \
567 "pushl 24(%%eax)\n\t" \
568 "pushl 20(%%eax)\n\t" \
569 "pushl 16(%%eax)\n\t" \
570 "pushl 12(%%eax)\n\t" \
571 "pushl 8(%%eax)\n\t" \
572 "pushl 4(%%eax)\n\t" \
573 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
574 VALGRIND_CALL_NOREDIR_EAX \
575 "addl $28, %%esp\n" \
576 : /*out*/ "=a" (_res) \
577 : /*in*/ "a" (&_argvec[0]) \
578 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
579 ); \
580 lval = (__typeof__(lval)) _res; \
581 } while (0)
582
583#define CALL_FN_W_8W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6, \
584 arg7,arg8) \
585 do { \
586 volatile void* _fnptr = (fnptr); \
587 volatile unsigned long _argvec[9]; \
588 volatile unsigned long _res; \
589 _argvec[0] = (unsigned long)_fnptr; \
590 _argvec[1] = (unsigned long)(arg1); \
591 _argvec[2] = (unsigned long)(arg2); \
592 _argvec[3] = (unsigned long)(arg3); \
593 _argvec[4] = (unsigned long)(arg4); \
594 _argvec[5] = (unsigned long)(arg5); \
595 _argvec[6] = (unsigned long)(arg6); \
596 _argvec[7] = (unsigned long)(arg7); \
597 _argvec[8] = (unsigned long)(arg8); \
598 __asm__ volatile( \
599 "pushl 32(%%eax)\n\t" \
600 "pushl 28(%%eax)\n\t" \
601 "pushl 24(%%eax)\n\t" \
602 "pushl 20(%%eax)\n\t" \
603 "pushl 16(%%eax)\n\t" \
604 "pushl 12(%%eax)\n\t" \
605 "pushl 8(%%eax)\n\t" \
606 "pushl 4(%%eax)\n\t" \
607 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
608 VALGRIND_CALL_NOREDIR_EAX \
609 "addl $32, %%esp\n" \
610 : /*out*/ "=a" (_res) \
611 : /*in*/ "a" (&_argvec[0]) \
612 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
613 ); \
614 lval = (__typeof__(lval)) _res; \
615 } while (0)
616
617#define CALL_FN_W_12W(lval, fnptr, arg1,arg2,arg3,arg4,arg5, \
618 arg6,arg7,arg8,arg9,arg10, \
619 arg11,arg12) \
620 do { \
621 volatile void* _fnptr = (fnptr); \
622 volatile unsigned long _argvec[13]; \
623 volatile unsigned long _res; \
624 _argvec[0] = (unsigned long)_fnptr; \
625 _argvec[1] = (unsigned long)(arg1); \
626 _argvec[2] = (unsigned long)(arg2); \
627 _argvec[3] = (unsigned long)(arg3); \
628 _argvec[4] = (unsigned long)(arg4); \
629 _argvec[5] = (unsigned long)(arg5); \
630 _argvec[6] = (unsigned long)(arg6); \
631 _argvec[7] = (unsigned long)(arg7); \
632 _argvec[8] = (unsigned long)(arg8); \
633 _argvec[9] = (unsigned long)(arg9); \
634 _argvec[10] = (unsigned long)(arg10); \
635 _argvec[11] = (unsigned long)(arg11); \
636 _argvec[12] = (unsigned long)(arg12); \
637 __asm__ volatile( \
638 "pushl 48(%%eax)\n\t" \
639 "pushl 44(%%eax)\n\t" \
640 "pushl 40(%%eax)\n\t" \
641 "pushl 36(%%eax)\n\t" \
642 "pushl 32(%%eax)\n\t" \
643 "pushl 28(%%eax)\n\t" \
644 "pushl 24(%%eax)\n\t" \
645 "pushl 20(%%eax)\n\t" \
646 "pushl 16(%%eax)\n\t" \
647 "pushl 12(%%eax)\n\t" \
648 "pushl 8(%%eax)\n\t" \
649 "pushl 4(%%eax)\n\t" \
650 "movl (%%eax), %%eax\n\t" /* target->%eax */ \
651 VALGRIND_CALL_NOREDIR_EAX \
652 "addl $48, %%esp\n" \
653 : /*out*/ "=a" (_res) \
654 : /*in*/ "a" (&_argvec[0]) \
655 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
656 ); \
657 lval = (__typeof__(lval)) _res; \
658 } while (0)
659
660#endif /* ARCH_x86 */
661
662/* --------------------------- amd64 --------------------------- */
663
664#if defined(ARCH_amd64)
665
666/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
667
668/* These regs are trashed by the hidden call. */
669#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
670 "rdi", "r8", "r9", "r10", "r11"
671
672/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
673 long) == 8. */
674
675#define CALL_FN_W_v(lval, fnptr) \
676 do { \
677 volatile void* _fnptr = (fnptr); \
678 volatile unsigned long _argvec[1]; \
679 volatile unsigned long _res; \
680 _argvec[0] = (unsigned long)_fnptr; \
681 __asm__ volatile( \
682 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
683 VALGRIND_CALL_NOREDIR_RAX \
684 : /*out*/ "=a" (_res) \
685 : /*in*/ "a" (&_argvec[0]) \
686 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
687 ); \
688 lval = (__typeof__(lval)) _res; \
689 } while (0)
690
691#define CALL_FN_W_W(lval, fnptr, arg1) \
692 do { \
693 volatile void* _fnptr = (fnptr); \
694 volatile unsigned long _argvec[2]; \
695 volatile unsigned long _res; \
696 _argvec[0] = (unsigned long)_fnptr; \
697 _argvec[1] = (unsigned long)(arg1); \
698 __asm__ volatile( \
699 "movq 8(%%rax), %%rdi\n\t" \
700 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
701 VALGRIND_CALL_NOREDIR_RAX \
702 : /*out*/ "=a" (_res) \
703 : /*in*/ "a" (&_argvec[0]) \
704 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
705 ); \
706 lval = (__typeof__(lval)) _res; \
707 } while (0)
708
709#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
710 do { \
711 volatile void* _fnptr = (fnptr); \
712 volatile unsigned long _argvec[3]; \
713 volatile unsigned long _res; \
714 _argvec[0] = (unsigned long)_fnptr; \
715 _argvec[1] = (unsigned long)(arg1); \
716 _argvec[2] = (unsigned long)(arg2); \
717 __asm__ volatile( \
718 "movq 16(%%rax), %%rsi\n\t" \
719 "movq 8(%%rax), %%rdi\n\t" \
720 "movq (%%rax), %%rax\n\t" /* target->%rax */ \
721 VALGRIND_CALL_NOREDIR_RAX \
722 : /*out*/ "=a" (_res) \
723 : /*in*/ "a" (&_argvec[0]) \
724 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
725 ); \
726 lval = (__typeof__(lval)) _res; \
727 } while (0)
728
729#endif /* ARCH_amd64 */
730
731/* --------------------------- ppc32 --------------------------- */
732
733#if defined(ARCH_ppc32)
734
735/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
736
737/* These regs are trashed by the hidden call. */
738#define __CALLER_SAVED_REGS "lr", \
739 "r0", "r2", "r3", "r4", "r5", "r6", \
740 "r7", "r8", "r9", "r10", "r11", "r12"
741
742/* These CALL_FN_ macros assume that on ppc32-linux, sizeof(unsigned
743 long) == 4. */
744
745#define CALL_FN_W_v(lval, fnptr) \
746 do { \
747 volatile void* _fnptr = (fnptr); \
748 volatile unsigned long _argvec[1]; \
749 volatile unsigned long _res; \
750 _argvec[0] = (unsigned long)_fnptr; \
751 __asm__ volatile( \
752 "mr 11,%1\n\t" \
753 "lwz 11,0(11)\n\t" /* target->r11 */ \
754 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
755 "mr %0,3" \
756 : /*out*/ "=r" (_res) \
757 : /*in*/ "r" (&_argvec[0]) \
758 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
759 ); \
760 lval = (__typeof__(lval)) _res; \
761 } while (0)
762
763#define CALL_FN_W_W(lval, fnptr, arg1) \
764 do { \
765 volatile void* _fnptr = (fnptr); \
766 volatile unsigned long _argvec[2]; \
767 volatile unsigned long _res; \
768 _argvec[0] = (unsigned long)_fnptr; \
769 _argvec[1] = (unsigned long)arg1; \
770 __asm__ volatile( \
771 "mr 11,%1\n\t" \
772 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
773 "lwz 11,0(11)\n\t" /* target->r11 */ \
774 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
775 "mr %0,3" \
776 : /*out*/ "=r" (_res) \
777 : /*in*/ "r" (&_argvec[0]) \
778 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
779 ); \
780 lval = (__typeof__(lval)) _res; \
781 } while (0)
782
783#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
784 do { \
785 volatile void* _fnptr = (fnptr); \
786 volatile unsigned long _argvec[3]; \
787 volatile unsigned long _res; \
788 _argvec[0] = (unsigned long)_fnptr; \
789 _argvec[1] = (unsigned long)arg1; \
790 _argvec[2] = (unsigned long)arg2; \
791 __asm__ volatile( \
792 "mr 11,%1\n\t" \
793 "lwz 3,4(11)\n\t" /* arg1->r3 */ \
794 "lwz 4,8(11)\n\t" \
795 "lwz 11,0(11)\n\t" /* target->r11 */ \
796 VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
797 "mr %0,3" \
798 : /*out*/ "=r" (_res) \
799 : /*in*/ "r" (&_argvec[0]) \
800 : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
801 ); \
802 lval = (__typeof__(lval)) _res; \
803 } while (0)
804
805#endif /* ARCH_ppc32 */
806
807/* --------------------------- ppc64 --------------------------- */
808
809
810/* ------------------------------------------------------------------ */
811/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
812/* */
njn30d76c62005-06-18 15:07:39 +0000813/* ------------------------------------------------------------------ */
814
sewardj2e93c502002-04-12 11:12:52 +0000815/* Some request codes. There are many more of these, but most are not
816 exposed to end-user view. These are the public ones, all of the
njn25e49d8e72002-09-23 09:36:25 +0000817 form 0x1000 + small_number.
njnd7994182003-10-02 13:44:04 +0000818
sewardj0ec07f32006-01-12 12:32:32 +0000819 Core ones are in the range 0x00000000--0x0000ffff. The non-public
820 ones start at 0x2000.
sewardj2e93c502002-04-12 11:12:52 +0000821*/
822
sewardj0ec07f32006-01-12 12:32:32 +0000823/* These macros are used by tools -- they must be public, but don't
824 embed them into other programs. */
njnfc26ff92004-11-22 19:12:49 +0000825#define VG_USERREQ_TOOL_BASE(a,b) \
njn4c791212003-05-02 17:53:54 +0000826 ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
njnfc26ff92004-11-22 19:12:49 +0000827#define VG_IS_TOOL_USERREQ(a, b, v) \
828 (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
sewardj34042512002-10-22 04:14:35 +0000829
njn25e49d8e72002-09-23 09:36:25 +0000830typedef
njn4c791212003-05-02 17:53:54 +0000831 enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
832 VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
njn3e884182003-04-15 13:03:23 +0000833
sewardj0ec07f32006-01-12 12:32:32 +0000834 /* These allow any function to be called from the simulated
835 CPU but run on the real CPU. Nb: the first arg passed to
836 the function is always the ThreadId of the running
837 thread! So CLIENT_CALL0 actually requires a 1 arg
njnd4795be2004-11-24 11:57:51 +0000838 function, etc. */
njn4c791212003-05-02 17:53:54 +0000839 VG_USERREQ__CLIENT_CALL0 = 0x1101,
840 VG_USERREQ__CLIENT_CALL1 = 0x1102,
841 VG_USERREQ__CLIENT_CALL2 = 0x1103,
842 VG_USERREQ__CLIENT_CALL3 = 0x1104,
njn3e884182003-04-15 13:03:23 +0000843
sewardj0ec07f32006-01-12 12:32:32 +0000844 /* Can be useful in regression testing suites -- eg. can
845 send Valgrind's output to /dev/null and still count
846 errors. */
njn4c791212003-05-02 17:53:54 +0000847 VG_USERREQ__COUNT_ERRORS = 0x1201,
njn47363ab2003-04-21 13:24:40 +0000848
sewardj0ec07f32006-01-12 12:32:32 +0000849 /* These are useful and can be interpreted by any tool that
850 tracks malloc() et al, by using vg_replace_malloc.c. */
njnd7994182003-10-02 13:44:04 +0000851 VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
852 VG_USERREQ__FREELIKE_BLOCK = 0x1302,
rjwalshbc0bb832004-06-19 18:12:36 +0000853 /* Memory pool support. */
854 VG_USERREQ__CREATE_MEMPOOL = 0x1303,
855 VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
856 VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
857 VG_USERREQ__MEMPOOL_FREE = 0x1306,
njnd7994182003-10-02 13:44:04 +0000858
fitzhardinge39de4b42003-10-31 07:12:21 +0000859 /* Allow printfs to valgrind log. */
njn30d76c62005-06-18 15:07:39 +0000860 VG_USERREQ__PRINTF = 0x1401,
rjwalsh0140af52005-06-04 20:42:33 +0000861 VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
862
863 /* Stack support. */
864 VG_USERREQ__STACK_REGISTER = 0x1501,
865 VG_USERREQ__STACK_DEREGISTER = 0x1502,
866 VG_USERREQ__STACK_CHANGE = 0x1503,
njn25e49d8e72002-09-23 09:36:25 +0000867 } Vg_ClientRequest;
sewardj2e93c502002-04-12 11:12:52 +0000868
sewardj0ec07f32006-01-12 12:32:32 +0000869#if !defined(__GNUC__)
870# define __extension__ /* */
muellerc9b36552003-12-31 14:32:23 +0000871#endif
sewardj2e93c502002-04-12 11:12:52 +0000872
sewardj0ec07f32006-01-12 12:32:32 +0000873/* Returns the number of Valgrinds this code is running under. That
874 is, 0 if running natively, 1 if running under Valgrind, 2 if
875 running under Valgrind which is running under another Valgrind,
876 etc. */
877#define RUNNING_ON_VALGRIND __extension__ \
878 ({unsigned int _qzz_res; \
879 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
880 VG_USERREQ__RUNNING_ON_VALGRIND, \
881 0, 0, 0, 0); \
882 _qzz_res; \
sewardjde4a1d02002-03-22 01:27:54 +0000883 })
884
885
sewardj18d75132002-05-16 11:06:21 +0000886/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
887 _qzz_len - 1]. Useful if you are debugging a JITter or some such,
888 since it provides a way to make sure valgrind will retranslate the
889 invalidated area. Returns no value. */
sewardj0ec07f32006-01-12 12:32:32 +0000890#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
891 {unsigned int _qzz_res; \
892 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
893 VG_USERREQ__DISCARD_TRANSLATIONS, \
894 _qzz_addr, _qzz_len, 0, 0); \
sewardj18d75132002-05-16 11:06:21 +0000895 }
896
njn26aba4d2005-05-16 13:31:23 +0000897
sewardj0ec07f32006-01-12 12:32:32 +0000898/* These requests are for getting Valgrind itself to print something.
899 Possibly with a backtrace. This is a really ugly hack. */
900
901#if defined(NVALGRIND)
902
903# define VALGRIND_PRINTF(...)
904# define VALGRIND_PRINTF_BACKTRACE(...)
njn26aba4d2005-05-16 13:31:23 +0000905
906#else /* NVALGRIND */
fitzhardinge39de4b42003-10-31 07:12:21 +0000907
fitzhardingea09a1b52003-11-07 23:09:48 +0000908int VALGRIND_PRINTF(const char *format, ...)
909 __attribute__((format(__printf__, 1, 2)));
fitzhardinge39de4b42003-10-31 07:12:21 +0000910__attribute__((weak))
911int
fitzhardingea09a1b52003-11-07 23:09:48 +0000912VALGRIND_PRINTF(const char *format, ...)
fitzhardinge39de4b42003-10-31 07:12:21 +0000913{
njnc6168192004-11-29 13:54:10 +0000914 unsigned long _qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000915 va_list vargs;
916 va_start(vargs, format);
sewardj0ec07f32006-01-12 12:32:32 +0000917 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
njnc6168192004-11-29 13:54:10 +0000918 (unsigned long)format, (unsigned long)vargs, 0, 0);
fitzhardinge39de4b42003-10-31 07:12:21 +0000919 va_end(vargs);
njnc6168192004-11-29 13:54:10 +0000920 return (int)_qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000921}
922
fitzhardingea09a1b52003-11-07 23:09:48 +0000923int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
924 __attribute__((format(__printf__, 1, 2)));
fitzhardinge39de4b42003-10-31 07:12:21 +0000925__attribute__((weak))
926int
fitzhardingea09a1b52003-11-07 23:09:48 +0000927VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
fitzhardinge39de4b42003-10-31 07:12:21 +0000928{
njnc6168192004-11-29 13:54:10 +0000929 unsigned long _qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000930 va_list vargs;
931 va_start(vargs, format);
sewardj0ec07f32006-01-12 12:32:32 +0000932 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
njnc6168192004-11-29 13:54:10 +0000933 (unsigned long)format, (unsigned long)vargs, 0, 0);
fitzhardinge39de4b42003-10-31 07:12:21 +0000934 va_end(vargs);
njnc6168192004-11-29 13:54:10 +0000935 return (int)_qzz_res;
fitzhardinge39de4b42003-10-31 07:12:21 +0000936}
937
fitzhardinge39de4b42003-10-31 07:12:21 +0000938#endif /* NVALGRIND */
sewardj18d75132002-05-16 11:06:21 +0000939
sewardj0ec07f32006-01-12 12:32:32 +0000940
njn3e884182003-04-15 13:03:23 +0000941/* These requests allow control to move from the simulated CPU to the
942 real CPU, calling an arbitary function */
sewardj0ec07f32006-01-12 12:32:32 +0000943#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
944 ({unsigned long _qyy_res; \
945 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
946 VG_USERREQ__CLIENT_CALL0, \
947 _qyy_fn, \
948 0, 0, 0); \
949 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000950 })
951
sewardj0ec07f32006-01-12 12:32:32 +0000952#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
953 ({unsigned long _qyy_res; \
954 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
955 VG_USERREQ__CLIENT_CALL1, \
956 _qyy_fn, \
957 _qyy_arg1, 0, 0); \
958 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000959 })
960
sewardj0ec07f32006-01-12 12:32:32 +0000961#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
962 ({unsigned long _qyy_res; \
963 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
964 VG_USERREQ__CLIENT_CALL2, \
965 _qyy_fn, \
966 _qyy_arg1, _qyy_arg2, 0); \
967 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000968 })
969
sewardj0ec07f32006-01-12 12:32:32 +0000970#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
971 ({unsigned long _qyy_res; \
972 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
973 VG_USERREQ__CLIENT_CALL3, \
974 _qyy_fn, \
975 _qyy_arg1, _qyy_arg2, _qyy_arg3); \
976 _qyy_res; \
njn3e884182003-04-15 13:03:23 +0000977 })
978
979
nethercote7cc9c232004-01-21 15:08:04 +0000980/* Counts the number of errors that have been recorded by a tool. Nb:
981 the tool must record the errors with VG_(maybe_record_error)() or
njn47363ab2003-04-21 13:24:40 +0000982 VG_(unique_error)() for them to be counted. */
sewardj0ec07f32006-01-12 12:32:32 +0000983#define VALGRIND_COUNT_ERRORS \
984 ({unsigned int _qyy_res; \
985 VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
986 VG_USERREQ__COUNT_ERRORS, \
987 0, 0, 0, 0); \
988 _qyy_res; \
njn47363ab2003-04-21 13:24:40 +0000989 })
990
njnd7994182003-10-02 13:44:04 +0000991/* Mark a block of memory as having been allocated by a malloc()-like
992 function. `addr' is the start of the usable block (ie. after any
993 redzone) `rzB' is redzone size if the allocator can apply redzones;
994 use '0' if not. Adding redzones makes it more likely Valgrind will spot
995 block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
996 for calloc(). Put it immediately after the point where a block is
997 allocated.
998
999 If you're allocating memory via superblocks, and then handing out small
1000 chunks of each superblock, if you don't have redzones on your small
1001 blocks, it's worth marking the superblock with VALGRIND_MAKE_NOACCESS
1002 when it's created, so that block overruns are detected. But if you can
1003 put redzones on, it's probably better to not do this, so that messages
1004 for small overruns are described in terms of the small block rather than
1005 the superblock (but if you have a big overrun that skips over a redzone,
1006 you could miss an error this way). See memcheck/tests/custom_alloc.c
1007 for an example.
1008
1009 Nb: block must be freed via a free()-like function specified
1010 with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
sewardj0ec07f32006-01-12 12:32:32 +00001011#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
1012 {unsigned int _qzz_res; \
1013 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1014 VG_USERREQ__MALLOCLIKE_BLOCK, \
1015 addr, sizeB, rzB, is_zeroed); \
njnd7994182003-10-02 13:44:04 +00001016 }
1017
1018/* Mark a block of memory as having been freed by a free()-like function.
1019 `rzB' is redzone size; it must match that given to
1020 VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
1021 checker. Put it immediately after the point where the block is freed. */
sewardj0ec07f32006-01-12 12:32:32 +00001022#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
1023 {unsigned int _qzz_res; \
1024 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1025 VG_USERREQ__FREELIKE_BLOCK, \
1026 addr, rzB, 0, 0); \
njnd7994182003-10-02 13:44:04 +00001027 }
1028
rjwalshbc0bb832004-06-19 18:12:36 +00001029/* Create a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001030#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
1031 {unsigned int _qzz_res; \
1032 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1033 VG_USERREQ__CREATE_MEMPOOL, \
1034 pool, rzB, is_zeroed, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001035 }
1036
1037/* Destroy a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001038#define VALGRIND_DESTROY_MEMPOOL(pool) \
1039 {unsigned int _qzz_res; \
1040 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1041 VG_USERREQ__DESTROY_MEMPOOL, \
1042 pool, 0, 0, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001043 }
1044
1045/* Associate a piece of memory with a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001046#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
1047 {unsigned int _qzz_res; \
1048 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1049 VG_USERREQ__MEMPOOL_ALLOC, \
1050 pool, addr, size, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001051 }
1052
1053/* Disassociate a piece of memory from a memory pool. */
sewardj0ec07f32006-01-12 12:32:32 +00001054#define VALGRIND_MEMPOOL_FREE(pool, addr) \
1055 {unsigned int _qzz_res; \
1056 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1057 VG_USERREQ__MEMPOOL_FREE, \
1058 pool, addr, 0, 0); \
rjwalshbc0bb832004-06-19 18:12:36 +00001059 }
1060
rjwalsh0140af52005-06-04 20:42:33 +00001061/* Mark a piece of memory as being a stack. Returns a stack id. */
sewardj0ec07f32006-01-12 12:32:32 +00001062#define VALGRIND_STACK_REGISTER(start, end) \
1063 ({unsigned int _qzz_res; \
1064 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1065 VG_USERREQ__STACK_REGISTER, \
1066 start, end, 0, 0); \
1067 _qzz_res; \
rjwalsh0140af52005-06-04 20:42:33 +00001068 })
1069
1070/* Unmark the piece of memory associated with a stack id as being a
1071 stack. */
sewardj0ec07f32006-01-12 12:32:32 +00001072#define VALGRIND_STACK_DEREGISTER(id) \
1073 {unsigned int _qzz_res; \
1074 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1075 VG_USERREQ__STACK_DEREGISTER, \
1076 id, 0, 0, 0); \
rjwalsh0140af52005-06-04 20:42:33 +00001077 }
1078
1079/* Change the start and end address of the stack id. */
sewardj0ec07f32006-01-12 12:32:32 +00001080#define VALGRIND_STACK_CHANGE(id, start, end) \
1081 {unsigned int _qzz_res; \
1082 VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
1083 VG_USERREQ__STACK_CHANGE, \
1084 id, start, end, 0); \
rjwalsh0140af52005-06-04 20:42:33 +00001085 }
1086
sewardj0ec07f32006-01-12 12:32:32 +00001087
1088#undef ARCH_x86
1089#undef ARCH_amd64
1090#undef ARCH_ppc32
1091#undef ARCH_ppc64
1092
njn3e884182003-04-15 13:03:23 +00001093#endif /* __VALGRIND_H */