blob: f70a46562c1f5bcc4f0d366ca282268f4d1c5e28 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- Replacements for malloc() et al, which run on the simulated ---*/
4/*--- CPU. vg_replace_malloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
10
11 Copyright (C) 2000-2002 Julian Seward
12 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32/* ---------------------------------------------------------------------
33 All the code in this file runs on the SIMULATED CPU. It is
34 intended for various reasons as drop-in replacements for malloc()
35 and friends. These functions have global visibility (obviously) and
36 have no prototypes in vg_include.h, since they are not intended to
37 be called from within Valgrind.
38
39 This file can be #included into a skin that wishes to know about
40 calls to malloc(). It should define functions SK_(malloc) et al
41 that will be called.
42 ------------------------------------------------------------------ */
43
44#include "vg_include.h"
45
46/* Sidestep the normal check which disallows using valgrind.h directly. */
47#define __VALGRIND_SOMESKIN_H
48#include "valgrind.h" /* for VG_NON_SIMD_tstCALL[12] */
49
50/*------------------------------------------------------------*/
51/*--- Command line options ---*/
52/*------------------------------------------------------------*/
53
54/* Round malloc sizes upwards to integral number of words? default: NO */
55Bool VG_(clo_sloppy_malloc) = False;
56
57/* DEBUG: print malloc details? default: NO */
58Bool VG_(clo_trace_malloc) = False;
59
60/* Minimum alignment in functions that don't specify alignment explicitly.
61 default: 0, i.e. use default of the machine (== 4) */
62Int VG_(clo_alignment) = 4;
63
64
65Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
66{
67 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
68 VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
69
70 if (VG_(clo_alignment) < 4
71 || VG_(clo_alignment) > 4096
72 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
73 VG_(message)(Vg_UserMsg, "");
74 VG_(message)(Vg_UserMsg,
75 "Invalid --alignment= setting. "
76 "Should be a power of 2, >= 4, <= 4096.");
77 VG_(bad_option)("--alignment");
78 }
79 }
80
81 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=yes"))
82 VG_(clo_sloppy_malloc) = True;
83 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=no"))
84 VG_(clo_sloppy_malloc) = False;
85
86 else if (VG_CLO_STREQ(arg, "--trace-malloc=yes"))
87 VG_(clo_trace_malloc) = True;
88 else if (VG_CLO_STREQ(arg, "--trace-malloc=no"))
89 VG_(clo_trace_malloc) = False;
90
91 else
92 return False;
93
94 return True;
95}
96
97void VG_(replacement_malloc_print_usage)(void)
98{
99 VG_(printf)(
100" --sloppy-malloc=no|yes round malloc sizes to next word? [no]\n"
101" --alignment=<number> set minimum alignment of allocations [4]\n"
102 );
103}
104
105void VG_(replacement_malloc_print_debug_usage)(void)
106{
107 VG_(printf)(
108" --trace-malloc=no|yes show client malloc details? [no]\n"
109 );
110}
111
112
113/*------------------------------------------------------------*/
114/*--- Replacing malloc() et al ---*/
115/*------------------------------------------------------------*/
116
117/* Below are new versions of malloc, __builtin_new, free,
118 __builtin_delete, calloc, realloc, memalign, and friends.
119
120 malloc, __builtin_new, free, __builtin_delete, calloc and realloc
121 can be entered either on the real CPU or the simulated one. If on
122 the real one, this is because the dynamic linker is running the
123 static initialisers for C++, before starting up Valgrind itself.
124 In this case it is safe to route calls through to
125 VG_(arena_malloc)/VG_(arena_free), since they are self-initialising.
126
127 Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
128 The call needs to be transferred from the simulated CPU back to the
129 real one and routed to the VG_(cli_malloc)() or VG_(cli_free)(). To do
130 that, the client-request mechanism (in valgrind.h) is used to convey
131 requests to the scheduler.
132*/
133
134#define MALLOC_TRACE(format, args...) \
135 if (VG_(clo_trace_malloc)) \
136 VG_(printf)(format, ## args )
137
138#define MAYBE_SLOPPIFY(n) \
139 if (VG_(clo_sloppy_malloc)) { \
140 while ((n % 4) > 0) n++; \
141 }
142
143/* ALL calls to malloc wind up here. */
144void* malloc ( Int n )
145{
146 void* v;
147
148 MALLOC_TRACE("malloc[simd=%d](%d)",
149 (UInt)VG_(is_running_on_simd_CPU)(), n );
150 MAYBE_SLOPPIFY(n);
151
152 if (VG_(is_running_on_simd_CPU)()) {
153 v = (void*)VG_NON_SIMD_tstCALL1( SK_(malloc), n );
154 } else if (VG_(clo_alignment) != 4) {
155 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
156 } else {
157 v = VG_(arena_malloc)(VG_AR_CLIENT, n);
158 }
159 MALLOC_TRACE(" = %p\n", v );
160 return v;
161}
162
163void* __builtin_new ( Int n )
164{
165 void* v;
166
167 MALLOC_TRACE("__builtin_new[simd=%d](%d)",
168 (UInt)VG_(is_running_on_simd_CPU)(), n );
169 MAYBE_SLOPPIFY(n);
170
171 if (VG_(is_running_on_simd_CPU)()) {
172 v = (void*)VG_NON_SIMD_tstCALL1( SK_(__builtin_new), n );
173 } else if (VG_(clo_alignment) != 4) {
174 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
175 } else {
176 v = VG_(arena_malloc)(VG_AR_CLIENT, n);
177 }
178 MALLOC_TRACE(" = %p\n", v );
179 return v;
180}
181
182/* gcc 3.X.X mangles them differently. */
183void* _Znwj ( Int n )
184{
185 return __builtin_new(n);
186}
187
188void* __builtin_vec_new ( Int n )
189{
190 void* v;
191
192 MALLOC_TRACE("__builtin_vec_new[simd=%d](%d)",
193 (UInt)VG_(is_running_on_simd_CPU)(), n );
194 MAYBE_SLOPPIFY(n);
195
196 if (VG_(is_running_on_simd_CPU)()) {
197 v = (void*)VG_NON_SIMD_tstCALL1( SK_(__builtin_vec_new), n );
198 } else if (VG_(clo_alignment) != 4) {
199 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
200 } else {
201 v = VG_(arena_malloc)(VG_AR_CLIENT, n);
202 }
203 MALLOC_TRACE(" = %p\n", v );
204 return v;
205}
206
207/* gcc 3.X.X mangles them differently. */
208void* _Znaj ( Int n )
209{
210 return __builtin_vec_new(n);
211}
212
213void free ( void* p )
214{
215 MALLOC_TRACE("free[simd=%d](%p)\n",
216 (UInt)VG_(is_running_on_simd_CPU)(), p );
217 if (p == NULL)
218 return;
219 if (VG_(is_running_on_simd_CPU)()) {
220 (void)VG_NON_SIMD_tstCALL1( SK_(free), p );
221 } else {
222 VG_(arena_free)(VG_AR_CLIENT, p);
223 }
224}
225
226void __builtin_delete ( void* p )
227{
228 MALLOC_TRACE("__builtin_delete[simd=%d](%p)\n",
229 (UInt)VG_(is_running_on_simd_CPU)(), p );
230 if (p == NULL)
231 return;
232 if (VG_(is_running_on_simd_CPU)()) {
233 (void)VG_NON_SIMD_tstCALL1( SK_(__builtin_delete), p );
234 } else {
235 VG_(arena_free)(VG_AR_CLIENT, p);
236 }
237}
238
239/* gcc 3.X.X mangles them differently. */
240void _ZdlPv ( void* p )
241{
242 __builtin_delete(p);
243}
244
245void __builtin_vec_delete ( void* p )
246{
247 MALLOC_TRACE("__builtin_vec_delete[simd=%d](%p)\n",
248 (UInt)VG_(is_running_on_simd_CPU)(), p );
249 if (p == NULL)
250 return;
251 if (VG_(is_running_on_simd_CPU)()) {
252 (void)VG_NON_SIMD_tstCALL1( SK_(__builtin_vec_delete), p );
253 } else {
254 VG_(arena_free)(VG_AR_CLIENT, p);
255 }
256}
257
258/* gcc 3.X.X mangles them differently. */
259void _ZdaPv ( void* p )
260{
261 __builtin_vec_delete(p);
262}
263
264void* calloc ( Int nmemb, Int size )
265{
266 void* v;
267
268 MALLOC_TRACE("calloc[simd=%d](%d,%d)",
269 (UInt)VG_(is_running_on_simd_CPU)(), nmemb, size );
270 MAYBE_SLOPPIFY(size);
271
272 if (VG_(is_running_on_simd_CPU)()) {
273 v = (void*)VG_NON_SIMD_tstCALL2( SK_(calloc), nmemb, size );
274 } else {
275 v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
276 }
277 MALLOC_TRACE(" = %p\n", v );
278 return v;
279}
280
281
282void* realloc ( void* ptrV, Int new_size )
283{
284 void* v;
285
286 MALLOC_TRACE("realloc[simd=%d](%p,%d)",
287 (UInt)VG_(is_running_on_simd_CPU)(), ptrV, new_size );
288 MAYBE_SLOPPIFY(new_size);
289
290 if (ptrV == NULL)
291 return malloc(new_size);
292 if (new_size <= 0) {
293 free(ptrV);
294 if (VG_(clo_trace_malloc))
295 VG_(printf)(" = 0\n" );
296 return NULL;
297 }
298 if (VG_(is_running_on_simd_CPU)()) {
299 v = (void*)VG_NON_SIMD_tstCALL2( SK_(realloc), ptrV, new_size );
300 } else {
301 v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
302 }
303 MALLOC_TRACE(" = %p\n", v );
304 return v;
305}
306
307
308void* memalign ( Int alignment, Int n )
309{
310 void* v;
311
312 MALLOC_TRACE("memalign[simd=%d](al %d, size %d)",
313 (UInt)VG_(is_running_on_simd_CPU)(), alignment, n );
314 MAYBE_SLOPPIFY(n);
315
316 if (VG_(is_running_on_simd_CPU)()) {
317 v = (void*)VG_NON_SIMD_tstCALL2( SK_(memalign), alignment, n );
318 } else {
319 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
320 }
321 MALLOC_TRACE(" = %p\n", v );
322 return v;
323}
324
325
326void* valloc ( Int size )
327{
328 return memalign(VKI_BYTES_PER_PAGE, size);
329}
330
331
332/* Various compatibility wrapper functions, for glibc and libstdc++. */
333void cfree ( void* p )
334{
335 free ( p );
336}
337
338
339int mallopt ( int cmd, int value )
340{
341 /* In glibc-2.2.4, 1 denotes a successful return value for mallopt */
342 return 1;
343}
344
345
346int __posix_memalign ( void **memptr, UInt alignment, UInt size )
347{
348 void *mem;
349
350 /* Test whether the SIZE argument is valid. It must be a power of
351 two multiple of sizeof (void *). */
352 if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
353 return VKI_EINVAL /*22*/ /*EINVAL*/;
354
355 mem = memalign (alignment, size);
356
357 if (mem != NULL) {
358 *memptr = mem;
359 return 0;
360 }
361
362 return VKI_ENOMEM /*12*/ /*ENOMEM*/;
363}
364
365
366/* Bomb out if we get any of these. */
367/* HACK: We shouldn't call VG_(core_panic) or VG_(message) on the simulated
368 CPU. Really we should pass the request in the usual way, and
369 Valgrind itself can do the panic. Too tedious, however.
370*/
371void pvalloc ( void )
372{ VG_(core_panic)("call to pvalloc\n"); }
373void malloc_stats ( void )
374{ VG_(core_panic)("call to malloc_stats\n"); }
375void malloc_usable_size ( void )
376{ VG_(core_panic)("call to malloc_usable_size\n"); }
377void malloc_trim ( void )
378{ VG_(core_panic)("call to malloc_trim\n"); }
379void malloc_get_state ( void )
380{ VG_(core_panic)("call to malloc_get_state\n"); }
381void malloc_set_state ( void )
382{ VG_(core_panic)("call to malloc_set_state\n"); }
383
384
385/* Yet another ugly hack. Cannot include <malloc.h> because we
386 implement functions implemented there with different signatures.
387 This struct definition MUST match the system one. */
388
389/* SVID2/XPG mallinfo structure */
390struct mallinfo {
391 int arena; /* total space allocated from system */
392 int ordblks; /* number of non-inuse chunks */
393 int smblks; /* unused -- always zero */
394 int hblks; /* number of mmapped regions */
395 int hblkhd; /* total space in mmapped regions */
396 int usmblks; /* unused -- always zero */
397 int fsmblks; /* unused -- always zero */
398 int uordblks; /* total allocated space */
399 int fordblks; /* total non-inuse space */
400 int keepcost; /* top-most, releasable (via malloc_trim) space */
401};
402
403struct mallinfo mallinfo ( void )
404{
405 /* Should really try to return something a bit more meaningful */
406 Int i;
407 struct mallinfo mi;
408 UChar* pmi = (UChar*)(&mi);
409 for (i = 0; i < sizeof(mi); i++)
410 pmi[i] = 0;
411 return mi;
412}
413
414/*--------------------------------------------------------------------*/
415/*--- end vg_replace_malloc.c ---*/
416/*--------------------------------------------------------------------*/