blob: 3a79a21a91209aa7f040285aa9c7dae2552b4712 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- Replacements for malloc() et al, which run on the simulated ---*/
4/*--- CPU. vg_replace_malloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
10
njn0e1b5142003-04-15 14:58:06 +000011 Copyright (C) 2000-2003 Julian Seward
njn3e884182003-04-15 13:03:23 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32/* ---------------------------------------------------------------------
33 All the code in this file runs on the SIMULATED CPU. It is
34 intended for various reasons as drop-in replacements for malloc()
35 and friends. These functions have global visibility (obviously) and
36 have no prototypes in vg_include.h, since they are not intended to
37 be called from within Valgrind.
38
39 This file can be #included into a skin that wishes to know about
40 calls to malloc(). It should define functions SK_(malloc) et al
41 that will be called.
42 ------------------------------------------------------------------ */
43
njn72718642003-07-24 08:45:32 +000044#include "valgrind.h" /* for VALGRIND_NON_SIMD_CALL[12] */
nethercote851b0f62003-11-13 23:02:16 +000045#include "vg_include.h"
njn3e884182003-04-15 13:03:23 +000046
47/*------------------------------------------------------------*/
48/*--- Command line options ---*/
49/*------------------------------------------------------------*/
50
51/* Round malloc sizes upwards to integral number of words? default: NO */
52Bool VG_(clo_sloppy_malloc) = False;
53
54/* DEBUG: print malloc details? default: NO */
55Bool VG_(clo_trace_malloc) = False;
56
57/* Minimum alignment in functions that don't specify alignment explicitly.
58 default: 0, i.e. use default of the machine (== 4) */
59Int VG_(clo_alignment) = 4;
60
61
62Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
63{
64 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
65 VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
66
67 if (VG_(clo_alignment) < 4
68 || VG_(clo_alignment) > 4096
69 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
70 VG_(message)(Vg_UserMsg, "");
71 VG_(message)(Vg_UserMsg,
72 "Invalid --alignment= setting. "
73 "Should be a power of 2, >= 4, <= 4096.");
74 VG_(bad_option)("--alignment");
75 }
76 }
77
78 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=yes"))
79 VG_(clo_sloppy_malloc) = True;
80 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=no"))
81 VG_(clo_sloppy_malloc) = False;
82
83 else if (VG_CLO_STREQ(arg, "--trace-malloc=yes"))
84 VG_(clo_trace_malloc) = True;
85 else if (VG_CLO_STREQ(arg, "--trace-malloc=no"))
86 VG_(clo_trace_malloc) = False;
87
88 else
89 return False;
90
91 return True;
92}
93
94void VG_(replacement_malloc_print_usage)(void)
95{
96 VG_(printf)(
97" --sloppy-malloc=no|yes round malloc sizes to next word? [no]\n"
98" --alignment=<number> set minimum alignment of allocations [4]\n"
99 );
100}
101
102void VG_(replacement_malloc_print_debug_usage)(void)
103{
104 VG_(printf)(
105" --trace-malloc=no|yes show client malloc details? [no]\n"
106 );
107}
108
109
110/*------------------------------------------------------------*/
111/*--- Replacing malloc() et al ---*/
112/*------------------------------------------------------------*/
113
114/* Below are new versions of malloc, __builtin_new, free,
115 __builtin_delete, calloc, realloc, memalign, and friends.
116
117 malloc, __builtin_new, free, __builtin_delete, calloc and realloc
118 can be entered either on the real CPU or the simulated one. If on
119 the real one, this is because the dynamic linker is running the
120 static initialisers for C++, before starting up Valgrind itself.
121 In this case it is safe to route calls through to
122 VG_(arena_malloc)/VG_(arena_free), since they are self-initialising.
123
124 Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
125 The call needs to be transferred from the simulated CPU back to the
126 real one and routed to the VG_(cli_malloc)() or VG_(cli_free)(). To do
127 that, the client-request mechanism (in valgrind.h) is used to convey
128 requests to the scheduler.
129*/
130
131#define MALLOC_TRACE(format, args...) \
132 if (VG_(clo_trace_malloc)) \
fitzhardinge7fae3e02003-10-31 07:13:41 +0000133 VALGRIND_INTERNAL_PRINTF(format, ## args )
njn3e884182003-04-15 13:03:23 +0000134
135#define MAYBE_SLOPPIFY(n) \
136 if (VG_(clo_sloppy_malloc)) { \
137 while ((n % 4) > 0) n++; \
138 }
139
njnd0eab5f2003-09-30 16:52:47 +0000140/* ALL calls to malloc() and friends wind up here. */
141#define ALLOC(fff, vgfff) \
142void* fff ( Int n ) \
143{ \
144 void* v; \
145 \
146 MALLOC_TRACE(#fff "[simd=%d](%d)", \
147 (UInt)VG_(is_running_on_simd_CPU)(), n ); \
148 MAYBE_SLOPPIFY(n); \
149 \
150 if (VG_(is_running_on_simd_CPU)()) { \
151 v = (void*)VALGRIND_NON_SIMD_CALL1( vgfff, n ); \
152 } else if (VG_(clo_alignment) != 4) { \
153 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n); \
154 } else { \
155 v = VG_(arena_malloc)(VG_AR_CLIENT, n); \
156 } \
fitzhardinge7fae3e02003-10-31 07:13:41 +0000157 MALLOC_TRACE(" = %p", v ); \
njnd0eab5f2003-09-30 16:52:47 +0000158 return v; \
njn3e884182003-04-15 13:03:23 +0000159}
njn5cebf572003-10-09 15:40:38 +0000160ALLOC( malloc, SK_(malloc) );
161ALLOC( __builtin_new, SK_(__builtin_new) );
162ALLOC( _Znwj, SK_(__builtin_new) );
163
164// operator new(unsigned, std::nothrow_t const&)
165ALLOC( _ZnwjRKSt9nothrow_t, SK_(__builtin_new) );
166
167ALLOC( __builtin_vec_new, SK_(__builtin_vec_new) );
168ALLOC( _Znaj, SK_(__builtin_vec_new) );
169
170// operator new[](unsigned, std::nothrow_t const&
171ALLOC( _ZnajRKSt9nothrow_t, SK_(__builtin_vec_new) );
njn3e884182003-04-15 13:03:23 +0000172
njnd0eab5f2003-09-30 16:52:47 +0000173#define FREE(fff, vgfff) \
174void fff ( void* p ) \
175{ \
fitzhardinge7fae3e02003-10-31 07:13:41 +0000176 MALLOC_TRACE(#fff "[simd=%d](%p)", \
njnd0eab5f2003-09-30 16:52:47 +0000177 (UInt)VG_(is_running_on_simd_CPU)(), p ); \
178 if (p == NULL) \
179 return; \
180 if (VG_(is_running_on_simd_CPU)()) { \
181 (void)VALGRIND_NON_SIMD_CALL1( vgfff, p ); \
182 } else { \
183 VG_(arena_free)(VG_AR_CLIENT, p); \
184 } \
njn3e884182003-04-15 13:03:23 +0000185}
njnd0eab5f2003-09-30 16:52:47 +0000186FREE( free, SK_(free) );
187FREE( __builtin_delete, SK_(__builtin_delete) );
188FREE( _ZdlPv, SK_(__builtin_delete) );
189FREE( __builtin_vec_delete, SK_(__builtin_vec_delete) );
190FREE( _ZdaPv, SK_(__builtin_vec_delete) );
njn3e884182003-04-15 13:03:23 +0000191
daywalker7700d682003-05-27 00:18:49 +0000192void* calloc ( UInt nmemb, UInt size )
njn3e884182003-04-15 13:03:23 +0000193{
194 void* v;
195
196 MALLOC_TRACE("calloc[simd=%d](%d,%d)",
197 (UInt)VG_(is_running_on_simd_CPU)(), nmemb, size );
198 MAYBE_SLOPPIFY(size);
199
200 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000201 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(calloc), nmemb, size );
njn3e884182003-04-15 13:03:23 +0000202 } else {
203 v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
204 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000205 MALLOC_TRACE(" = %p", v );
njn3e884182003-04-15 13:03:23 +0000206 return v;
207}
208
209
210void* realloc ( void* ptrV, Int new_size )
211{
212 void* v;
213
214 MALLOC_TRACE("realloc[simd=%d](%p,%d)",
215 (UInt)VG_(is_running_on_simd_CPU)(), ptrV, new_size );
216 MAYBE_SLOPPIFY(new_size);
217
218 if (ptrV == NULL)
219 return malloc(new_size);
220 if (new_size <= 0) {
221 free(ptrV);
222 if (VG_(clo_trace_malloc))
fitzhardinge7fae3e02003-10-31 07:13:41 +0000223 VG_(printf)(" = 0" );
njn3e884182003-04-15 13:03:23 +0000224 return NULL;
225 }
226 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000227 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(realloc), ptrV, new_size );
njn3e884182003-04-15 13:03:23 +0000228 } else {
229 v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
230 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000231 MALLOC_TRACE(" = %p", v );
njn3e884182003-04-15 13:03:23 +0000232 return v;
233}
234
235
236void* memalign ( Int alignment, Int n )
237{
238 void* v;
239
240 MALLOC_TRACE("memalign[simd=%d](al %d, size %d)",
241 (UInt)VG_(is_running_on_simd_CPU)(), alignment, n );
242 MAYBE_SLOPPIFY(n);
243
244 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000245 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(memalign), alignment, n );
njn3e884182003-04-15 13:03:23 +0000246 } else {
247 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
248 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000249 MALLOC_TRACE(" = %p", v );
njn3e884182003-04-15 13:03:23 +0000250 return v;
251}
252
253
254void* valloc ( Int size )
255{
256 return memalign(VKI_BYTES_PER_PAGE, size);
257}
258
259
260/* Various compatibility wrapper functions, for glibc and libstdc++. */
261void cfree ( void* p )
262{
263 free ( p );
264}
265
266
267int mallopt ( int cmd, int value )
268{
269 /* In glibc-2.2.4, 1 denotes a successful return value for mallopt */
270 return 1;
271}
272
273
274int __posix_memalign ( void **memptr, UInt alignment, UInt size )
275{
276 void *mem;
277
mueller211d05d2003-11-28 00:15:57 +0000278 /* Test whether the alignment argument is valid. It must be a power of
njn3e884182003-04-15 13:03:23 +0000279 two multiple of sizeof (void *). */
mueller211d05d2003-11-28 00:15:57 +0000280 if (alignment % sizeof (void *) != 0 || (alignment & (alignment - 1)) != 0)
njn3e884182003-04-15 13:03:23 +0000281 return VKI_EINVAL /*22*/ /*EINVAL*/;
282
283 mem = memalign (alignment, size);
284
285 if (mem != NULL) {
286 *memptr = mem;
287 return 0;
288 }
289
290 return VKI_ENOMEM /*12*/ /*ENOMEM*/;
291}
292
nethercote71110aa2003-11-15 19:24:08 +0000293# define weak_alias(name, aliasname) \
294 extern __typeof (name) aliasname __attribute__ ((weak, alias (#name)));
295weak_alias(__posix_memalign, posix_memalign);
296
njn8a6b6c02003-04-22 22:45:55 +0000297Int malloc_usable_size ( void* p )
298{
299 Int pszB;
300
301 MALLOC_TRACE("malloc_usable_size[simd=%d](%p)",
302 (UInt)VG_(is_running_on_simd_CPU)(), p );
303 if (NULL == p)
304 return 0;
305
306 if (VG_(is_running_on_simd_CPU)()) {
307 pszB = (Int)VALGRIND_NON_SIMD_CALL2( VG_(arena_payload_szB),
308 VG_AR_CLIENT, p );
309 } else {
310 pszB = VG_(arena_payload_szB)(VG_AR_CLIENT, p);
311 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000312 MALLOC_TRACE(" = %d", pszB );
njn8a6b6c02003-04-22 22:45:55 +0000313
314 return pszB;
315}
316
njn3e884182003-04-15 13:03:23 +0000317
318/* Bomb out if we get any of these. */
319/* HACK: We shouldn't call VG_(core_panic) or VG_(message) on the simulated
320 CPU. Really we should pass the request in the usual way, and
321 Valgrind itself can do the panic. Too tedious, however.
322*/
323void pvalloc ( void )
324{ VG_(core_panic)("call to pvalloc\n"); }
325void malloc_stats ( void )
326{ VG_(core_panic)("call to malloc_stats\n"); }
njn8a6b6c02003-04-22 22:45:55 +0000327
njn3e884182003-04-15 13:03:23 +0000328void malloc_trim ( void )
329{ VG_(core_panic)("call to malloc_trim\n"); }
330void malloc_get_state ( void )
331{ VG_(core_panic)("call to malloc_get_state\n"); }
332void malloc_set_state ( void )
333{ VG_(core_panic)("call to malloc_set_state\n"); }
334
335
336/* Yet another ugly hack. Cannot include <malloc.h> because we
337 implement functions implemented there with different signatures.
338 This struct definition MUST match the system one. */
339
340/* SVID2/XPG mallinfo structure */
341struct mallinfo {
342 int arena; /* total space allocated from system */
343 int ordblks; /* number of non-inuse chunks */
344 int smblks; /* unused -- always zero */
345 int hblks; /* number of mmapped regions */
346 int hblkhd; /* total space in mmapped regions */
347 int usmblks; /* unused -- always zero */
348 int fsmblks; /* unused -- always zero */
349 int uordblks; /* total allocated space */
350 int fordblks; /* total non-inuse space */
351 int keepcost; /* top-most, releasable (via malloc_trim) space */
352};
353
354struct mallinfo mallinfo ( void )
355{
356 /* Should really try to return something a bit more meaningful */
sewardj05bcdcb2003-05-18 10:05:38 +0000357 UInt i;
njn3e884182003-04-15 13:03:23 +0000358 struct mallinfo mi;
359 UChar* pmi = (UChar*)(&mi);
360 for (i = 0; i < sizeof(mi); i++)
361 pmi[i] = 0;
362 return mi;
363}
364
365/*--------------------------------------------------------------------*/
366/*--- end vg_replace_malloc.c ---*/
367/*--------------------------------------------------------------------*/