blob: a1162eb2425dd7ab395ead2c332ea178955cbe24 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- Replacements for malloc() et al, which run on the simulated ---*/
4/*--- CPU. vg_replace_malloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
10
njn0e1b5142003-04-15 14:58:06 +000011 Copyright (C) 2000-2003 Julian Seward
njn3e884182003-04-15 13:03:23 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32/* ---------------------------------------------------------------------
33 All the code in this file runs on the SIMULATED CPU. It is
34 intended for various reasons as drop-in replacements for malloc()
35 and friends. These functions have global visibility (obviously) and
36 have no prototypes in vg_include.h, since they are not intended to
37 be called from within Valgrind.
38
39 This file can be #included into a skin that wishes to know about
40 calls to malloc(). It should define functions SK_(malloc) et al
41 that will be called.
42 ------------------------------------------------------------------ */
43
44#include "vg_include.h"
45
46/* Sidestep the normal check which disallows using valgrind.h directly. */
47#define __VALGRIND_SOMESKIN_H
njn72718642003-07-24 08:45:32 +000048#include "valgrind.h" /* for VALGRIND_NON_SIMD_CALL[12] */
njn3e884182003-04-15 13:03:23 +000049
50/*------------------------------------------------------------*/
51/*--- Command line options ---*/
52/*------------------------------------------------------------*/
53
54/* Round malloc sizes upwards to integral number of words? default: NO */
55Bool VG_(clo_sloppy_malloc) = False;
56
57/* DEBUG: print malloc details? default: NO */
58Bool VG_(clo_trace_malloc) = False;
59
60/* Minimum alignment in functions that don't specify alignment explicitly.
61 default: 0, i.e. use default of the machine (== 4) */
62Int VG_(clo_alignment) = 4;
63
64
65Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
66{
67 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
68 VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
69
70 if (VG_(clo_alignment) < 4
71 || VG_(clo_alignment) > 4096
72 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
73 VG_(message)(Vg_UserMsg, "");
74 VG_(message)(Vg_UserMsg,
75 "Invalid --alignment= setting. "
76 "Should be a power of 2, >= 4, <= 4096.");
77 VG_(bad_option)("--alignment");
78 }
79 }
80
81 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=yes"))
82 VG_(clo_sloppy_malloc) = True;
83 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=no"))
84 VG_(clo_sloppy_malloc) = False;
85
86 else if (VG_CLO_STREQ(arg, "--trace-malloc=yes"))
87 VG_(clo_trace_malloc) = True;
88 else if (VG_CLO_STREQ(arg, "--trace-malloc=no"))
89 VG_(clo_trace_malloc) = False;
90
91 else
92 return False;
93
94 return True;
95}
96
97void VG_(replacement_malloc_print_usage)(void)
98{
99 VG_(printf)(
100" --sloppy-malloc=no|yes round malloc sizes to next word? [no]\n"
101" --alignment=<number> set minimum alignment of allocations [4]\n"
102 );
103}
104
105void VG_(replacement_malloc_print_debug_usage)(void)
106{
107 VG_(printf)(
108" --trace-malloc=no|yes show client malloc details? [no]\n"
109 );
110}
111
112
113/*------------------------------------------------------------*/
114/*--- Replacing malloc() et al ---*/
115/*------------------------------------------------------------*/
116
117/* Below are new versions of malloc, __builtin_new, free,
118 __builtin_delete, calloc, realloc, memalign, and friends.
119
120 malloc, __builtin_new, free, __builtin_delete, calloc and realloc
121 can be entered either on the real CPU or the simulated one. If on
122 the real one, this is because the dynamic linker is running the
123 static initialisers for C++, before starting up Valgrind itself.
124 In this case it is safe to route calls through to
125 VG_(arena_malloc)/VG_(arena_free), since they are self-initialising.
126
127 Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
128 The call needs to be transferred from the simulated CPU back to the
129 real one and routed to the VG_(cli_malloc)() or VG_(cli_free)(). To do
130 that, the client-request mechanism (in valgrind.h) is used to convey
131 requests to the scheduler.
132*/
133
134#define MALLOC_TRACE(format, args...) \
135 if (VG_(clo_trace_malloc)) \
fitzhardinge7fae3e02003-10-31 07:13:41 +0000136 VALGRIND_INTERNAL_PRINTF(format, ## args )
njn3e884182003-04-15 13:03:23 +0000137
138#define MAYBE_SLOPPIFY(n) \
139 if (VG_(clo_sloppy_malloc)) { \
140 while ((n % 4) > 0) n++; \
141 }
142
njnd0eab5f2003-09-30 16:52:47 +0000143/* ALL calls to malloc() and friends wind up here. */
144#define ALLOC(fff, vgfff) \
145void* fff ( Int n ) \
146{ \
147 void* v; \
148 \
149 MALLOC_TRACE(#fff "[simd=%d](%d)", \
150 (UInt)VG_(is_running_on_simd_CPU)(), n ); \
151 MAYBE_SLOPPIFY(n); \
152 \
153 if (VG_(is_running_on_simd_CPU)()) { \
154 v = (void*)VALGRIND_NON_SIMD_CALL1( vgfff, n ); \
155 } else if (VG_(clo_alignment) != 4) { \
156 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n); \
157 } else { \
158 v = VG_(arena_malloc)(VG_AR_CLIENT, n); \
159 } \
fitzhardinge7fae3e02003-10-31 07:13:41 +0000160 MALLOC_TRACE(" = %p", v ); \
njnd0eab5f2003-09-30 16:52:47 +0000161 return v; \
njn3e884182003-04-15 13:03:23 +0000162}
njn5cebf572003-10-09 15:40:38 +0000163ALLOC( malloc, SK_(malloc) );
164ALLOC( __builtin_new, SK_(__builtin_new) );
165ALLOC( _Znwj, SK_(__builtin_new) );
166
167// operator new(unsigned, std::nothrow_t const&)
168ALLOC( _ZnwjRKSt9nothrow_t, SK_(__builtin_new) );
169
170ALLOC( __builtin_vec_new, SK_(__builtin_vec_new) );
171ALLOC( _Znaj, SK_(__builtin_vec_new) );
172
173// operator new[](unsigned, std::nothrow_t const&
174ALLOC( _ZnajRKSt9nothrow_t, SK_(__builtin_vec_new) );
njn3e884182003-04-15 13:03:23 +0000175
njnd0eab5f2003-09-30 16:52:47 +0000176#define FREE(fff, vgfff) \
177void fff ( void* p ) \
178{ \
fitzhardinge7fae3e02003-10-31 07:13:41 +0000179 MALLOC_TRACE(#fff "[simd=%d](%p)", \
njnd0eab5f2003-09-30 16:52:47 +0000180 (UInt)VG_(is_running_on_simd_CPU)(), p ); \
181 if (p == NULL) \
182 return; \
183 if (VG_(is_running_on_simd_CPU)()) { \
184 (void)VALGRIND_NON_SIMD_CALL1( vgfff, p ); \
185 } else { \
186 VG_(arena_free)(VG_AR_CLIENT, p); \
187 } \
njn3e884182003-04-15 13:03:23 +0000188}
njnd0eab5f2003-09-30 16:52:47 +0000189FREE( free, SK_(free) );
190FREE( __builtin_delete, SK_(__builtin_delete) );
191FREE( _ZdlPv, SK_(__builtin_delete) );
192FREE( __builtin_vec_delete, SK_(__builtin_vec_delete) );
193FREE( _ZdaPv, SK_(__builtin_vec_delete) );
njn3e884182003-04-15 13:03:23 +0000194
daywalker7700d682003-05-27 00:18:49 +0000195void* calloc ( UInt nmemb, UInt size )
njn3e884182003-04-15 13:03:23 +0000196{
197 void* v;
198
199 MALLOC_TRACE("calloc[simd=%d](%d,%d)",
200 (UInt)VG_(is_running_on_simd_CPU)(), nmemb, size );
201 MAYBE_SLOPPIFY(size);
202
203 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000204 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(calloc), nmemb, size );
njn3e884182003-04-15 13:03:23 +0000205 } else {
206 v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
207 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000208 MALLOC_TRACE(" = %p", v );
njn3e884182003-04-15 13:03:23 +0000209 return v;
210}
211
212
213void* realloc ( void* ptrV, Int new_size )
214{
215 void* v;
216
217 MALLOC_TRACE("realloc[simd=%d](%p,%d)",
218 (UInt)VG_(is_running_on_simd_CPU)(), ptrV, new_size );
219 MAYBE_SLOPPIFY(new_size);
220
221 if (ptrV == NULL)
222 return malloc(new_size);
223 if (new_size <= 0) {
224 free(ptrV);
225 if (VG_(clo_trace_malloc))
fitzhardinge7fae3e02003-10-31 07:13:41 +0000226 VG_(printf)(" = 0" );
njn3e884182003-04-15 13:03:23 +0000227 return NULL;
228 }
229 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000230 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(realloc), ptrV, new_size );
njn3e884182003-04-15 13:03:23 +0000231 } else {
232 v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
233 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000234 MALLOC_TRACE(" = %p", v );
njn3e884182003-04-15 13:03:23 +0000235 return v;
236}
237
238
239void* memalign ( Int alignment, Int n )
240{
241 void* v;
242
243 MALLOC_TRACE("memalign[simd=%d](al %d, size %d)",
244 (UInt)VG_(is_running_on_simd_CPU)(), alignment, n );
245 MAYBE_SLOPPIFY(n);
246
247 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000248 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(memalign), alignment, n );
njn3e884182003-04-15 13:03:23 +0000249 } else {
250 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
251 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000252 MALLOC_TRACE(" = %p", v );
njn3e884182003-04-15 13:03:23 +0000253 return v;
254}
255
256
257void* valloc ( Int size )
258{
259 return memalign(VKI_BYTES_PER_PAGE, size);
260}
261
262
263/* Various compatibility wrapper functions, for glibc and libstdc++. */
264void cfree ( void* p )
265{
266 free ( p );
267}
268
269
270int mallopt ( int cmd, int value )
271{
272 /* In glibc-2.2.4, 1 denotes a successful return value for mallopt */
273 return 1;
274}
275
276
277int __posix_memalign ( void **memptr, UInt alignment, UInt size )
278{
279 void *mem;
280
281 /* Test whether the SIZE argument is valid. It must be a power of
282 two multiple of sizeof (void *). */
283 if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
284 return VKI_EINVAL /*22*/ /*EINVAL*/;
285
286 mem = memalign (alignment, size);
287
288 if (mem != NULL) {
289 *memptr = mem;
290 return 0;
291 }
292
293 return VKI_ENOMEM /*12*/ /*ENOMEM*/;
294}
295
njn8a6b6c02003-04-22 22:45:55 +0000296Int malloc_usable_size ( void* p )
297{
298 Int pszB;
299
300 MALLOC_TRACE("malloc_usable_size[simd=%d](%p)",
301 (UInt)VG_(is_running_on_simd_CPU)(), p );
302 if (NULL == p)
303 return 0;
304
305 if (VG_(is_running_on_simd_CPU)()) {
306 pszB = (Int)VALGRIND_NON_SIMD_CALL2( VG_(arena_payload_szB),
307 VG_AR_CLIENT, p );
308 } else {
309 pszB = VG_(arena_payload_szB)(VG_AR_CLIENT, p);
310 }
fitzhardinge7fae3e02003-10-31 07:13:41 +0000311 MALLOC_TRACE(" = %d", pszB );
njn8a6b6c02003-04-22 22:45:55 +0000312
313 return pszB;
314}
315
njn3e884182003-04-15 13:03:23 +0000316
317/* Bomb out if we get any of these. */
318/* HACK: We shouldn't call VG_(core_panic) or VG_(message) on the simulated
319 CPU. Really we should pass the request in the usual way, and
320 Valgrind itself can do the panic. Too tedious, however.
321*/
322void pvalloc ( void )
323{ VG_(core_panic)("call to pvalloc\n"); }
324void malloc_stats ( void )
325{ VG_(core_panic)("call to malloc_stats\n"); }
njn8a6b6c02003-04-22 22:45:55 +0000326
njn3e884182003-04-15 13:03:23 +0000327void malloc_trim ( void )
328{ VG_(core_panic)("call to malloc_trim\n"); }
329void malloc_get_state ( void )
330{ VG_(core_panic)("call to malloc_get_state\n"); }
331void malloc_set_state ( void )
332{ VG_(core_panic)("call to malloc_set_state\n"); }
333
334
335/* Yet another ugly hack. Cannot include <malloc.h> because we
336 implement functions implemented there with different signatures.
337 This struct definition MUST match the system one. */
338
339/* SVID2/XPG mallinfo structure */
340struct mallinfo {
341 int arena; /* total space allocated from system */
342 int ordblks; /* number of non-inuse chunks */
343 int smblks; /* unused -- always zero */
344 int hblks; /* number of mmapped regions */
345 int hblkhd; /* total space in mmapped regions */
346 int usmblks; /* unused -- always zero */
347 int fsmblks; /* unused -- always zero */
348 int uordblks; /* total allocated space */
349 int fordblks; /* total non-inuse space */
350 int keepcost; /* top-most, releasable (via malloc_trim) space */
351};
352
353struct mallinfo mallinfo ( void )
354{
355 /* Should really try to return something a bit more meaningful */
sewardj05bcdcb2003-05-18 10:05:38 +0000356 UInt i;
njn3e884182003-04-15 13:03:23 +0000357 struct mallinfo mi;
358 UChar* pmi = (UChar*)(&mi);
359 for (i = 0; i < sizeof(mi); i++)
360 pmi[i] = 0;
361 return mi;
362}
363
364/*--------------------------------------------------------------------*/
365/*--- end vg_replace_malloc.c ---*/
366/*--------------------------------------------------------------------*/