blob: 28068c2754204d2018f547005a8b048019015231 [file] [log] [blame]
njn3e884182003-04-15 13:03:23 +00001
2/*--------------------------------------------------------------------*/
3/*--- Replacements for malloc() et al, which run on the simulated ---*/
4/*--- CPU. vg_replace_malloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an extensible x86 protected-mode
9 emulator for monitoring program execution on x86-Unixes.
10
njn0e1b5142003-04-15 14:58:06 +000011 Copyright (C) 2000-2003 Julian Seward
njn3e884182003-04-15 13:03:23 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32/* ---------------------------------------------------------------------
33 All the code in this file runs on the SIMULATED CPU. It is
34 intended for various reasons as drop-in replacements for malloc()
35 and friends. These functions have global visibility (obviously) and
36 have no prototypes in vg_include.h, since they are not intended to
37 be called from within Valgrind.
38
39 This file can be #included into a skin that wishes to know about
40 calls to malloc(). It should define functions SK_(malloc) et al
41 that will be called.
42 ------------------------------------------------------------------ */
43
44#include "vg_include.h"
45
46/* Sidestep the normal check which disallows using valgrind.h directly. */
47#define __VALGRIND_SOMESKIN_H
njn057c65f2003-04-21 13:30:55 +000048#include "valgrind.h" /* for VALGRIND_NON_SIMD_tstCALL[12] */
njn3e884182003-04-15 13:03:23 +000049
50/*------------------------------------------------------------*/
51/*--- Command line options ---*/
52/*------------------------------------------------------------*/
53
54/* Round malloc sizes upwards to integral number of words? default: NO */
55Bool VG_(clo_sloppy_malloc) = False;
56
57/* DEBUG: print malloc details? default: NO */
58Bool VG_(clo_trace_malloc) = False;
59
60/* Minimum alignment in functions that don't specify alignment explicitly.
61 default: 0, i.e. use default of the machine (== 4) */
62Int VG_(clo_alignment) = 4;
63
64
65Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg)
66{
67 if (VG_CLO_STREQN(12, arg, "--alignment=")) {
68 VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]);
69
70 if (VG_(clo_alignment) < 4
71 || VG_(clo_alignment) > 4096
72 || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) {
73 VG_(message)(Vg_UserMsg, "");
74 VG_(message)(Vg_UserMsg,
75 "Invalid --alignment= setting. "
76 "Should be a power of 2, >= 4, <= 4096.");
77 VG_(bad_option)("--alignment");
78 }
79 }
80
81 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=yes"))
82 VG_(clo_sloppy_malloc) = True;
83 else if (VG_CLO_STREQ(arg, "--sloppy-malloc=no"))
84 VG_(clo_sloppy_malloc) = False;
85
86 else if (VG_CLO_STREQ(arg, "--trace-malloc=yes"))
87 VG_(clo_trace_malloc) = True;
88 else if (VG_CLO_STREQ(arg, "--trace-malloc=no"))
89 VG_(clo_trace_malloc) = False;
90
91 else
92 return False;
93
94 return True;
95}
96
97void VG_(replacement_malloc_print_usage)(void)
98{
99 VG_(printf)(
100" --sloppy-malloc=no|yes round malloc sizes to next word? [no]\n"
101" --alignment=<number> set minimum alignment of allocations [4]\n"
102 );
103}
104
105void VG_(replacement_malloc_print_debug_usage)(void)
106{
107 VG_(printf)(
108" --trace-malloc=no|yes show client malloc details? [no]\n"
109 );
110}
111
112
113/*------------------------------------------------------------*/
114/*--- Replacing malloc() et al ---*/
115/*------------------------------------------------------------*/
116
117/* Below are new versions of malloc, __builtin_new, free,
118 __builtin_delete, calloc, realloc, memalign, and friends.
119
120 malloc, __builtin_new, free, __builtin_delete, calloc and realloc
121 can be entered either on the real CPU or the simulated one. If on
122 the real one, this is because the dynamic linker is running the
123 static initialisers for C++, before starting up Valgrind itself.
124 In this case it is safe to route calls through to
125 VG_(arena_malloc)/VG_(arena_free), since they are self-initialising.
126
127 Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
128 The call needs to be transferred from the simulated CPU back to the
129 real one and routed to the VG_(cli_malloc)() or VG_(cli_free)(). To do
130 that, the client-request mechanism (in valgrind.h) is used to convey
131 requests to the scheduler.
132*/
133
134#define MALLOC_TRACE(format, args...) \
135 if (VG_(clo_trace_malloc)) \
136 VG_(printf)(format, ## args )
137
138#define MAYBE_SLOPPIFY(n) \
139 if (VG_(clo_sloppy_malloc)) { \
140 while ((n % 4) > 0) n++; \
141 }
142
143/* ALL calls to malloc wind up here. */
144void* malloc ( Int n )
145{
146 void* v;
147
148 MALLOC_TRACE("malloc[simd=%d](%d)",
149 (UInt)VG_(is_running_on_simd_CPU)(), n );
150 MAYBE_SLOPPIFY(n);
151
152 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000153 v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(malloc), n );
njn3e884182003-04-15 13:03:23 +0000154 } else if (VG_(clo_alignment) != 4) {
155 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
156 } else {
157 v = VG_(arena_malloc)(VG_AR_CLIENT, n);
158 }
159 MALLOC_TRACE(" = %p\n", v );
160 return v;
161}
162
163void* __builtin_new ( Int n )
164{
165 void* v;
166
167 MALLOC_TRACE("__builtin_new[simd=%d](%d)",
168 (UInt)VG_(is_running_on_simd_CPU)(), n );
169 MAYBE_SLOPPIFY(n);
170
171 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000172 v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_new), n );
njn3e884182003-04-15 13:03:23 +0000173 } else if (VG_(clo_alignment) != 4) {
174 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
175 } else {
176 v = VG_(arena_malloc)(VG_AR_CLIENT, n);
177 }
178 MALLOC_TRACE(" = %p\n", v );
179 return v;
180}
181
182/* gcc 3.X.X mangles them differently. */
183void* _Znwj ( Int n )
184{
185 return __builtin_new(n);
186}
187
188void* __builtin_vec_new ( Int n )
189{
190 void* v;
191
192 MALLOC_TRACE("__builtin_vec_new[simd=%d](%d)",
193 (UInt)VG_(is_running_on_simd_CPU)(), n );
194 MAYBE_SLOPPIFY(n);
195
196 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000197 v = (void*)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_vec_new), n );
njn3e884182003-04-15 13:03:23 +0000198 } else if (VG_(clo_alignment) != 4) {
199 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, VG_(clo_alignment), n);
200 } else {
201 v = VG_(arena_malloc)(VG_AR_CLIENT, n);
202 }
203 MALLOC_TRACE(" = %p\n", v );
204 return v;
205}
206
207/* gcc 3.X.X mangles them differently. */
208void* _Znaj ( Int n )
209{
210 return __builtin_vec_new(n);
211}
212
213void free ( void* p )
214{
215 MALLOC_TRACE("free[simd=%d](%p)\n",
216 (UInt)VG_(is_running_on_simd_CPU)(), p );
217 if (p == NULL)
218 return;
219 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000220 (void)VALGRIND_NON_SIMD_tstCALL1( SK_(free), p );
njn3e884182003-04-15 13:03:23 +0000221 } else {
222 VG_(arena_free)(VG_AR_CLIENT, p);
223 }
224}
225
226void __builtin_delete ( void* p )
227{
228 MALLOC_TRACE("__builtin_delete[simd=%d](%p)\n",
229 (UInt)VG_(is_running_on_simd_CPU)(), p );
230 if (p == NULL)
231 return;
232 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000233 (void)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_delete), p );
njn3e884182003-04-15 13:03:23 +0000234 } else {
235 VG_(arena_free)(VG_AR_CLIENT, p);
236 }
237}
238
239/* gcc 3.X.X mangles them differently. */
240void _ZdlPv ( void* p )
241{
242 __builtin_delete(p);
243}
244
245void __builtin_vec_delete ( void* p )
246{
247 MALLOC_TRACE("__builtin_vec_delete[simd=%d](%p)\n",
248 (UInt)VG_(is_running_on_simd_CPU)(), p );
249 if (p == NULL)
250 return;
251 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000252 (void)VALGRIND_NON_SIMD_tstCALL1( SK_(__builtin_vec_delete), p );
njn3e884182003-04-15 13:03:23 +0000253 } else {
254 VG_(arena_free)(VG_AR_CLIENT, p);
255 }
256}
257
258/* gcc 3.X.X mangles them differently. */
259void _ZdaPv ( void* p )
260{
261 __builtin_vec_delete(p);
262}
263
264void* calloc ( Int nmemb, Int size )
265{
266 void* v;
267
268 MALLOC_TRACE("calloc[simd=%d](%d,%d)",
269 (UInt)VG_(is_running_on_simd_CPU)(), nmemb, size );
270 MAYBE_SLOPPIFY(size);
271
272 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000273 v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(calloc), nmemb, size );
njn3e884182003-04-15 13:03:23 +0000274 } else {
275 v = VG_(arena_calloc)(VG_AR_CLIENT, VG_(clo_alignment), nmemb, size);
276 }
277 MALLOC_TRACE(" = %p\n", v );
278 return v;
279}
280
281
282void* realloc ( void* ptrV, Int new_size )
283{
284 void* v;
285
286 MALLOC_TRACE("realloc[simd=%d](%p,%d)",
287 (UInt)VG_(is_running_on_simd_CPU)(), ptrV, new_size );
288 MAYBE_SLOPPIFY(new_size);
289
290 if (ptrV == NULL)
291 return malloc(new_size);
292 if (new_size <= 0) {
293 free(ptrV);
294 if (VG_(clo_trace_malloc))
295 VG_(printf)(" = 0\n" );
296 return NULL;
297 }
298 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000299 v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(realloc), ptrV, new_size );
njn3e884182003-04-15 13:03:23 +0000300 } else {
301 v = VG_(arena_realloc)(VG_AR_CLIENT, ptrV, VG_(clo_alignment), new_size);
302 }
303 MALLOC_TRACE(" = %p\n", v );
304 return v;
305}
306
307
308void* memalign ( Int alignment, Int n )
309{
310 void* v;
311
312 MALLOC_TRACE("memalign[simd=%d](al %d, size %d)",
313 (UInt)VG_(is_running_on_simd_CPU)(), alignment, n );
314 MAYBE_SLOPPIFY(n);
315
316 if (VG_(is_running_on_simd_CPU)()) {
njn057c65f2003-04-21 13:30:55 +0000317 v = (void*)VALGRIND_NON_SIMD_tstCALL2( SK_(memalign), alignment, n );
njn3e884182003-04-15 13:03:23 +0000318 } else {
319 v = VG_(arena_malloc_aligned)(VG_AR_CLIENT, alignment, n);
320 }
321 MALLOC_TRACE(" = %p\n", v );
322 return v;
323}
324
325
326void* valloc ( Int size )
327{
328 return memalign(VKI_BYTES_PER_PAGE, size);
329}
330
331
332/* Various compatibility wrapper functions, for glibc and libstdc++. */
333void cfree ( void* p )
334{
335 free ( p );
336}
337
338
339int mallopt ( int cmd, int value )
340{
341 /* In glibc-2.2.4, 1 denotes a successful return value for mallopt */
342 return 1;
343}
344
345
346int __posix_memalign ( void **memptr, UInt alignment, UInt size )
347{
348 void *mem;
349
350 /* Test whether the SIZE argument is valid. It must be a power of
351 two multiple of sizeof (void *). */
352 if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
353 return VKI_EINVAL /*22*/ /*EINVAL*/;
354
355 mem = memalign (alignment, size);
356
357 if (mem != NULL) {
358 *memptr = mem;
359 return 0;
360 }
361
362 return VKI_ENOMEM /*12*/ /*ENOMEM*/;
363}
364
njn8a6b6c02003-04-22 22:45:55 +0000365Int malloc_usable_size ( void* p )
366{
367 Int pszB;
368
369 MALLOC_TRACE("malloc_usable_size[simd=%d](%p)",
370 (UInt)VG_(is_running_on_simd_CPU)(), p );
371 if (NULL == p)
372 return 0;
373
374 if (VG_(is_running_on_simd_CPU)()) {
375 pszB = (Int)VALGRIND_NON_SIMD_CALL2( VG_(arena_payload_szB),
376 VG_AR_CLIENT, p );
377 } else {
378 pszB = VG_(arena_payload_szB)(VG_AR_CLIENT, p);
379 }
380 MALLOC_TRACE(" = %d\n", pszB );
381
382 return pszB;
383}
384
njn3e884182003-04-15 13:03:23 +0000385
386/* Bomb out if we get any of these. */
387/* HACK: We shouldn't call VG_(core_panic) or VG_(message) on the simulated
388 CPU. Really we should pass the request in the usual way, and
389 Valgrind itself can do the panic. Too tedious, however.
390*/
391void pvalloc ( void )
392{ VG_(core_panic)("call to pvalloc\n"); }
393void malloc_stats ( void )
394{ VG_(core_panic)("call to malloc_stats\n"); }
njn8a6b6c02003-04-22 22:45:55 +0000395
njn3e884182003-04-15 13:03:23 +0000396void malloc_trim ( void )
397{ VG_(core_panic)("call to malloc_trim\n"); }
398void malloc_get_state ( void )
399{ VG_(core_panic)("call to malloc_get_state\n"); }
400void malloc_set_state ( void )
401{ VG_(core_panic)("call to malloc_set_state\n"); }
402
403
404/* Yet another ugly hack. Cannot include <malloc.h> because we
405 implement functions implemented there with different signatures.
406 This struct definition MUST match the system one. */
407
408/* SVID2/XPG mallinfo structure */
409struct mallinfo {
410 int arena; /* total space allocated from system */
411 int ordblks; /* number of non-inuse chunks */
412 int smblks; /* unused -- always zero */
413 int hblks; /* number of mmapped regions */
414 int hblkhd; /* total space in mmapped regions */
415 int usmblks; /* unused -- always zero */
416 int fsmblks; /* unused -- always zero */
417 int uordblks; /* total allocated space */
418 int fordblks; /* total non-inuse space */
419 int keepcost; /* top-most, releasable (via malloc_trim) space */
420};
421
422struct mallinfo mallinfo ( void )
423{
424 /* Should really try to return something a bit more meaningful */
sewardj05bcdcb2003-05-18 10:05:38 +0000425 UInt i;
njn3e884182003-04-15 13:03:23 +0000426 struct mallinfo mi;
427 UChar* pmi = (UChar*)(&mi);
428 for (i = 0; i < sizeof(mi); i++)
429 pmi[i] = 0;
430 return mi;
431}
432
433/*--------------------------------------------------------------------*/
434/*--- end vg_replace_malloc.c ---*/
435/*--------------------------------------------------------------------*/