blob: 9c3ca2560e3474841cfe67c270c7819e4189d5d7 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- Replacements for malloc() et al, which run on the simulated ---*/
4/*--- CPU. vg_replace_malloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
10
njn0e1b5142003-04-15 14:58:06 +000011 Copyright (C) 2000-2003 Julian Seward
njn3e884182003-04-15 13:03:23 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32/* ---------------------------------------------------------------------
33 All the code in this file runs on the SIMULATED CPU. It is
34 intended for various reasons as drop-in replacements for malloc()
35 and friends. These functions have global visibility (obviously) and
36 have no prototypes in vg_include.h, since they are not intended to
37 be called from within Valgrind.
38
39 This file can be #included into a skin that wishes to know about
40 calls to malloc(). It should define functions SK_(malloc) et al
41 that will be called.
42 ------------------------------------------------------------------ */
43
44#include "vg_include.h"
45
46/* Sidestep the normal check which disallows using valgrind.h directly. */
47#define __VALGRIND_SOMESKIN_H
njn72718642003-07-24 08:45:32 +000048#include "valgrind.h" /* for VALGRIND_NON_SIMD_CALL[12] */
njn3e884182003-04-15 13:03:23 +000049
50/*------------------------------------------------------------*/
51/*--- Command line options ---*/
52/*------------------------------------------------------------*/
53
54/* Round malloc sizes upwards to integral number of words? default: NO */
55Bool VG_(clo_sloppy_malloc) = False;
56
57/* DEBUG: print malloc details? default: NO */
58Bool VG_(clo_trace_malloc) = False;
59
60/* Minimum alignment in functions that don't specify alignment explicitly.
61 default: 0, i.e. use default of the machine (== 4) */
62Int VG_(clo_alignment) = 4;
63
64
65Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
66{
67 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
68 VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
69
70 if (VG_(clo_alignment) < 4
71 || VG_(clo_alignment) > 4096
72 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
73 VG_(message)(Vg_UserMsg, "");
74 VG_(message)(Vg_UserMsg,
75 "Invalid --alignment= setting. "
76 "Should be a power of 2, >= 4, <= 4096.");
77 VG_(bad_option)("--alignment");
78 }
79 }
80
81 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=yes"))
82 VG_(clo_sloppy_malloc) = True;
83 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=no"))
84 VG_(clo_sloppy_malloc) = False;
85
86 else if (VG_CLO_STREQ(arg, "--trace-malloc=yes"))
87 VG_(clo_trace_malloc) = True;
88 else if (VG_CLO_STREQ(arg, "--trace-malloc=no"))
89 VG_(clo_trace_malloc) = False;
90
91 else
92 return False;
93
94 return True;
95}
96
97void VG_(replacement_malloc_print_usage)(void)
98{
99 VG_(printf)(
100" --sloppy-malloc=no|yes round malloc sizes to next word? [no]\n"
101" --alignment=<number> set minimum alignment of allocations [4]\n"
102 );
103}
104
105void VG_(replacement_malloc_print_debug_usage)(void)
106{
107 VG_(printf)(
108" --trace-malloc=no|yes show client malloc details? [no]\n"
109 );
110}
111
112
113/*------------------------------------------------------------*/
114/*--- Replacing malloc() et al ---*/
115/*------------------------------------------------------------*/
116
117/* Below are new versions of malloc, __builtin_new, free,
118 __builtin_delete, calloc, realloc, memalign, and friends.
119
120 malloc, __builtin_new, free, __builtin_delete, calloc and realloc
121 can be entered either on the real CPU or the simulated one. If on
122 the real one, this is because the dynamic linker is running the
123 static initialisers for C++, before starting up Valgrind itself.
124 In this case it is safe to route calls through to
125 VG_(arena_malloc)/VG_(arena_free), since they are self-initialising.
126
127 Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
128 The call needs to be transferred from the simulated CPU back to the
129 real one and routed to the VG_(cli_malloc)() or VG_(cli_free)(). To do
130 that, the client-request mechanism (in valgrind.h) is used to convey
131 requests to the scheduler.
132*/
133
134#define MALLOC_TRACE(format, args...) \
135 if (VG_(clo_trace_malloc)) \
136 VG_(printf)(format, ## args )
137
138#define MAYBE_SLOPPIFY(n) \
139 if (VG_(clo_sloppy_malloc)) { \
140 while ((n % 4) > 0) n++; \
141 }
142
njnd0eab5f2003-09-30 16:52:47 +0000143/* ALL calls to malloc() and friends wind up here. */
144#define ALLOC(fff, vgfff) \
145void* fff ( Int n ) \
146{ \
147 void* v; \
148 \
149 MALLOC_TRACE(#fff "[simd=%d](%d)", \
150 (UInt)VG_(is_running_on_simd_CPU)(), n ); \
151 MAYBE_SLOPPIFY(n); \
152 \
153 if (VG_(is_running_on_simd_CPU)()) { \
154 v = (void*)VALGRIND_NON_SIMD_CALL1( vgfff, n ); \
155 } else if (VG_(clo_alignment) != 4) { \
156 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n); \
157 } else { \
158 v = VG_(arena_malloc)(VG_AR_CLIENT, n); \
159 } \
160 MALLOC_TRACE(" = %p\n", v ); \
161 return v; \
njn3e884182003-04-15 13:03:23 +0000162}
njnd0eab5f2003-09-30 16:52:47 +0000163ALLOC( malloc, SK_(malloc) );
164ALLOC( __builtin_new, SK_(__builtin_new) );
165ALLOC( _Znwj, SK_(__builtin_new) );
166ALLOC( __builtin_vec_new, SK_(__builtin_vec_new) );
167ALLOC( _Znaj, SK_(__builtin_vec_new) );
njn3e884182003-04-15 13:03:23 +0000168
njnd0eab5f2003-09-30 16:52:47 +0000169#define FREE(fff, vgfff) \
170void fff ( void* p ) \
171{ \
172 MALLOC_TRACE(#fff "[simd=%d](%p)\n", \
173 (UInt)VG_(is_running_on_simd_CPU)(), p ); \
174 if (p == NULL) \
175 return; \
176 if (VG_(is_running_on_simd_CPU)()) { \
177 (void)VALGRIND_NON_SIMD_CALL1( vgfff, p ); \
178 } else { \
179 VG_(arena_free)(VG_AR_CLIENT, p); \
180 } \
njn3e884182003-04-15 13:03:23 +0000181}
njnd0eab5f2003-09-30 16:52:47 +0000182FREE( free, SK_(free) );
183FREE( __builtin_delete, SK_(__builtin_delete) );
184FREE( _ZdlPv, SK_(__builtin_delete) );
185FREE( __builtin_vec_delete, SK_(__builtin_vec_delete) );
186FREE( _ZdaPv, SK_(__builtin_vec_delete) );
njn3e884182003-04-15 13:03:23 +0000187
daywalker7700d682003-05-27 00:18:49 +0000188void* calloc ( UInt nmemb, UInt size )
njn3e884182003-04-15 13:03:23 +0000189{
190 void* v;
191
192 MALLOC_TRACE("calloc[simd=%d](%d,%d)",
193 (UInt)VG_(is_running_on_simd_CPU)(), nmemb, size );
194 MAYBE_SLOPPIFY(size);
195
196 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000197 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(calloc), nmemb, size );
njn3e884182003-04-15 13:03:23 +0000198 } else {
199 v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
200 }
201 MALLOC_TRACE(" = %p\n", v );
202 return v;
203}
204
205
206void* realloc ( void* ptrV, Int new_size )
207{
208 void* v;
209
210 MALLOC_TRACE("realloc[simd=%d](%p,%d)",
211 (UInt)VG_(is_running_on_simd_CPU)(), ptrV, new_size );
212 MAYBE_SLOPPIFY(new_size);
213
214 if (ptrV == NULL)
215 return malloc(new_size);
216 if (new_size <= 0) {
217 free(ptrV);
218 if (VG_(clo_trace_malloc))
219 VG_(printf)(" = 0\n" );
220 return NULL;
221 }
222 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000223 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(realloc), ptrV, new_size );
njn3e884182003-04-15 13:03:23 +0000224 } else {
225 v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
226 }
227 MALLOC_TRACE(" = %p\n", v );
228 return v;
229}
230
231
232void* memalign ( Int alignment, Int n )
233{
234 void* v;
235
236 MALLOC_TRACE("memalign[simd=%d](al %d, size %d)",
237 (UInt)VG_(is_running_on_simd_CPU)(), alignment, n );
238 MAYBE_SLOPPIFY(n);
239
240 if (VG_(is_running_on_simd_CPU)()) {
njn72718642003-07-24 08:45:32 +0000241 v = (void*)VALGRIND_NON_SIMD_CALL2( SK_(memalign), alignment, n );
njn3e884182003-04-15 13:03:23 +0000242 } else {
243 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
244 }
245 MALLOC_TRACE(" = %p\n", v );
246 return v;
247}
248
249
250void* valloc ( Int size )
251{
252 return memalign(VKI_BYTES_PER_PAGE, size);
253}
254
255
256/* Various compatibility wrapper functions, for glibc and libstdc++. */
257void cfree ( void* p )
258{
259 free ( p );
260}
261
262
263int mallopt ( int cmd, int value )
264{
265 /* In glibc-2.2.4, 1 denotes a successful return value for mallopt */
266 return 1;
267}
268
269
270int __posix_memalign ( void **memptr, UInt alignment, UInt size )
271{
272 void *mem;
273
274 /* Test whether the SIZE argument is valid. It must be a power of
275 two multiple of sizeof (void *). */
276 if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
277 return VKI_EINVAL /*22*/ /*EINVAL*/;
278
279 mem = memalign (alignment, size);
280
281 if (mem != NULL) {
282 *memptr = mem;
283 return 0;
284 }
285
286 return VKI_ENOMEM /*12*/ /*ENOMEM*/;
287}
288
njn8a6b6c02003-04-22 22:45:55 +0000289Int malloc_usable_size ( void* p )
290{
291 Int pszB;
292
293 MALLOC_TRACE("malloc_usable_size[simd=%d](%p)",
294 (UInt)VG_(is_running_on_simd_CPU)(), p );
295 if (NULL == p)
296 return 0;
297
298 if (VG_(is_running_on_simd_CPU)()) {
299 pszB = (Int)VALGRIND_NON_SIMD_CALL2( VG_(arena_payload_szB),
300 VG_AR_CLIENT, p );
301 } else {
302 pszB = VG_(arena_payload_szB)(VG_AR_CLIENT, p);
303 }
304 MALLOC_TRACE(" = %d\n", pszB );
305
306 return pszB;
307}
308
njn3e884182003-04-15 13:03:23 +0000309
310/* Bomb out if we get any of these. */
311/* HACK: We shouldn't call VG_(core_panic) or VG_(message) on the simulated
312 CPU. Really we should pass the request in the usual way, and
313 Valgrind itself can do the panic. Too tedious, however.
314*/
315void pvalloc ( void )
316{ VG_(core_panic)("call to pvalloc\n"); }
317void malloc_stats ( void )
318{ VG_(core_panic)("call to malloc_stats\n"); }
njn8a6b6c02003-04-22 22:45:55 +0000319
njn3e884182003-04-15 13:03:23 +0000320void malloc_trim ( void )
321{ VG_(core_panic)("call to malloc_trim\n"); }
322void malloc_get_state ( void )
323{ VG_(core_panic)("call to malloc_get_state\n"); }
324void malloc_set_state ( void )
325{ VG_(core_panic)("call to malloc_set_state\n"); }
326
327
328/* Yet another ugly hack. Cannot include <malloc.h> because we
329 implement functions implemented there with different signatures.
330 This struct definition MUST match the system one. */
331
332/* SVID2/XPG mallinfo structure */
333struct mallinfo {
334 int arena; /* total space allocated from system */
335 int ordblks; /* number of non-inuse chunks */
336 int smblks; /* unused -- always zero */
337 int hblks; /* number of mmapped regions */
338 int hblkhd; /* total space in mmapped regions */
339 int usmblks; /* unused -- always zero */
340 int fsmblks; /* unused -- always zero */
341 int uordblks; /* total allocated space */
342 int fordblks; /* total non-inuse space */
343 int keepcost; /* top-most, releasable (via malloc_trim) space */
344};
345
346struct mallinfo mallinfo ( void )
347{
348 /* Should really try to return something a bit more meaningful */
sewardj05bcdcb2003-05-18 10:05:38 +0000349 UInt i;
njn3e884182003-04-15 13:03:23 +0000350 struct mallinfo mi;
351 UChar* pmi = (UChar*)(&mi);
352 for (i = 0; i < sizeof(mi); i++)
353 pmi[i] = 0;
354 return mi;
355}
356
357/*--------------------------------------------------------------------*/
358/*--- end vg_replace_malloc.c ---*/
359/*--------------------------------------------------------------------*/