blob: d2be752d093a13f839980795dad67475844c5d45 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- An implementation of malloc/free for the client. ---*/
4/*--- vg_clientmalloc.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an x86 protected-mode emulator
9 designed for debugging and profiling binaries on x86-Unixes.
10
11 Copyright (C) 2000-2002 Julian Seward
12 jseward@acm.org
13 Julian_Seward@muraroa.demon.co.uk
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file LICENSE.
31*/
32
33#include "vg_include.h"
34
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* #define DEBUG_CLIENTMALLOC */
41
42/* Holds malloc'd but not freed blocks. */
43#define VG_MALLOCLIST_NO(aa) (((UInt)(aa)) % VG_N_MALLOCLISTS)
44static ShadowChunk* vg_malloclist[VG_N_MALLOCLISTS];
45static Bool vg_client_malloc_init_done = False;
46
47/* Holds blocks after freeing. */
48static ShadowChunk* vg_freed_list_start = NULL;
49static ShadowChunk* vg_freed_list_end = NULL;
50static Int vg_freed_list_volume = 0;
51
52/* Stats ... */
53static UInt vg_cmalloc_n_mallocs = 0;
54static UInt vg_cmalloc_n_frees = 0;
55static UInt vg_cmalloc_bs_mallocd = 0;
56
57static UInt vg_mlist_frees = 0;
58static UInt vg_mlist_tries = 0;
59
60
61/*------------------------------------------------------------*/
62/*--- Fns ---*/
63/*------------------------------------------------------------*/
64
65/* Allocate a suitably-sized array, copy all the malloc-d block
66 shadows into it, and return both the array and the size of it.
67 This is used by the memory-leak detector.
68*/
69ShadowChunk** VG_(get_malloc_shadows) ( /*OUT*/ UInt* n_shadows )
70{
71 UInt i, scn;
72 ShadowChunk** arr;
73 ShadowChunk* sc;
74 *n_shadows = 0;
75 for (scn = 0; scn < VG_N_MALLOCLISTS; scn++) {
76 for (sc = vg_malloclist[scn]; sc != NULL; sc = sc->next) {
77 (*n_shadows)++;
78 }
79 }
80 if (*n_shadows == 0) return NULL;
81
82 arr = VG_(malloc)( VG_AR_PRIVATE,
83 *n_shadows * sizeof(ShadowChunk*) );
84
85 i = 0;
86 for (scn = 0; scn < VG_N_MALLOCLISTS; scn++) {
87 for (sc = vg_malloclist[scn]; sc != NULL; sc = sc->next) {
88 arr[i++] = sc;
89 }
90 }
91 vg_assert(i == *n_shadows);
92 return arr;
93}
94
95static void client_malloc_init ( void )
96{
97 UInt ml_no;
98 if (vg_client_malloc_init_done) return;
99 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++)
100 vg_malloclist[ml_no] = NULL;
101 vg_client_malloc_init_done = True;
102}
103
104
105static __attribute__ ((unused))
106 Int count_freelist ( void )
107{
108 ShadowChunk* sc;
109 Int n = 0;
110 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
111 n++;
112 return n;
113}
114
115static __attribute__ ((unused))
116 Int count_malloclists ( void )
117{
118 ShadowChunk* sc;
119 UInt ml_no;
120 Int n = 0;
121 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++)
122 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next)
123 n++;
124 return n;
125}
126
127static __attribute__ ((unused))
128 void freelist_sanity ( void )
129{
130 ShadowChunk* sc;
131 Int n = 0;
132 /* VG_(printf)("freelist sanity\n"); */
133 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
134 n += sc->size;
135 vg_assert(n == vg_freed_list_volume);
136}
137
138/* Remove sc from malloc list # sc. It is an unchecked error for
139 sc not to be present in the list.
140*/
141static void remove_from_malloclist ( UInt ml_no, ShadowChunk* sc )
142{
143 ShadowChunk *sc1, *sc2;
144 if (sc == vg_malloclist[ml_no]) {
145 vg_malloclist[ml_no] = vg_malloclist[ml_no]->next;
146 } else {
147 sc1 = vg_malloclist[ml_no];
148 vg_assert(sc1 != NULL);
149 sc2 = sc1->next;
150 while (sc2 != sc) {
151 vg_assert(sc2 != NULL);
152 sc1 = sc2;
153 sc2 = sc2->next;
154 }
155 vg_assert(sc1->next == sc);
156 vg_assert(sc2 == sc);
157 sc1->next = sc2->next;
158 }
159}
160
161
162/* Put a shadow chunk on the freed blocks queue, possibly freeing up
163 some of the oldest blocks in the queue at the same time. */
164
165static void add_to_freed_queue ( ShadowChunk* sc )
166{
167 ShadowChunk* sc1;
168
169 /* Put it at the end of the freed list */
170 if (vg_freed_list_end == NULL) {
171 vg_assert(vg_freed_list_start == NULL);
172 vg_freed_list_end = vg_freed_list_start = sc;
173 vg_freed_list_volume = sc->size;
174 } else {
175 vg_assert(vg_freed_list_end->next == NULL);
176 vg_freed_list_end->next = sc;
177 vg_freed_list_end = sc;
178 vg_freed_list_volume += sc->size;
179 }
180 sc->next = NULL;
181
182 /* Release enough of the oldest blocks to bring the free queue
183 volume below vg_clo_freelist_vol. */
184
185 while (vg_freed_list_volume > VG_(clo_freelist_vol)) {
186 /* freelist_sanity(); */
187 vg_assert(vg_freed_list_start != NULL);
188 vg_assert(vg_freed_list_end != NULL);
189
190 sc1 = vg_freed_list_start;
191 vg_freed_list_volume -= sc1->size;
192 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
193 vg_assert(vg_freed_list_volume >= 0);
194
195 if (vg_freed_list_start == vg_freed_list_end) {
196 vg_freed_list_start = vg_freed_list_end = NULL;
197 } else {
198 vg_freed_list_start = sc1->next;
199 }
200 sc1->next = NULL; /* just paranoia */
201 VG_(free)(VG_AR_CLIENT, (void*)(sc1->data));
202 VG_(free)(VG_AR_PRIVATE, sc1);
203 }
204}
205
206
207/* Allocate a user-chunk of size bytes. Also allocate its shadow
208 block, make the shadow block point at the user block. Put the
209 shadow chunk on the appropriate list, and set all memory
210 protections correctly. */
211
212static ShadowChunk* client_malloc_shadow ( UInt align, UInt size,
213 VgAllocKind kind )
214{
215 ShadowChunk* sc;
216 Addr p;
217 UInt ml_no;
218
219# ifdef DEBUG_CLIENTMALLOC
220 VG_(printf)("[m %d, f %d (%d)] client_malloc_shadow ( al %d, sz %d )\n",
221 count_malloclists(),
222 count_freelist(), vg_freed_list_volume,
223 align, size );
224# endif
225
226 if (align == 0)
227 p = (Addr)VG_(malloc)(VG_AR_CLIENT, size);
228 else
229 p = (Addr)VG_(malloc_aligned)(VG_AR_CLIENT, align, size);
230
231 sc = VG_(malloc)(VG_AR_PRIVATE, sizeof(ShadowChunk));
232 sc->where = VG_(get_ExeContext)(True);
233 sc->size = size;
234 sc->allockind = kind;
235 sc->data = p;
236 ml_no = VG_MALLOCLIST_NO(p);
237 sc->next = vg_malloclist[ml_no];
238 vg_malloclist[ml_no] = sc;
239
240 VGM_(make_writable)(p, size);
241 VGM_(make_noaccess)(p + size,
242 VG_AR_CLIENT_REDZONE_SZB);
243 VGM_(make_noaccess)(p - VG_AR_CLIENT_REDZONE_SZB,
244 VG_AR_CLIENT_REDZONE_SZB);
245
246 return sc;
247}
248
249
250/* Allocate memory, noticing whether or not we are doing the full
251 instrumentation thing. */
252
253void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind )
254{
255 ShadowChunk* sc;
256 VgAllocKind kind;
257
258 VGP_PUSHCC(VgpCliMalloc);
259 client_malloc_init();
260# ifdef DEBUG_CLIENTMALLOC
261 VG_(printf)("[m %d, f %d (%d)] client_malloc ( %d, %x )\n",
262 count_malloclists(),
263 count_freelist(), vg_freed_list_volume,
264 size, raw_alloc_kind );
265# endif
266 if (!VG_(clo_instrument)) {
267 VGP_POPCC;
268 return VG_(malloc) ( VG_AR_CLIENT, size );
269 }
270 switch (raw_alloc_kind) {
271 case 0x4002: kind = Vg_AllocNewVec; break;
272 case 0x4001: kind = Vg_AllocNew; break;
273 case 0x4000: /* malloc */
274 case 6666: /* calloc */
275 kind = Vg_AllocMalloc; break;
276 default: /* should not happen */
277 /* therefore we make sure it doesn't -- JRS */
278 VG_(panic)("VG_(client_malloc): raw_alloc_kind");
279 break; /*NOTREACHED*/
280 }
281 sc = client_malloc_shadow ( 0, size, kind );
282 VGP_POPCC;
283 return (void*)(sc->data);
284}
285
286
287void* VG_(client_memalign) ( UInt align, UInt size )
288{
289 ShadowChunk* sc;
290 VGP_PUSHCC(VgpCliMalloc);
291 client_malloc_init();
292# ifdef DEBUG_CLIENTMALLOC
293 VG_(printf)("[m %d, f %d (%d)] client_memalign ( al %d, sz %d )\n",
294 count_malloclists(),
295 count_freelist(), vg_freed_list_volume,
296 align, size );
297# endif
298 if (!VG_(clo_instrument)) {
299 VGP_POPCC;
300 return VG_(malloc_aligned) ( VG_AR_CLIENT, align, size );
301 }
302 sc = client_malloc_shadow ( align, size, Vg_AllocMalloc );
303 VGP_POPCC;
304 return (void*)(sc->data);
305}
306
307
308void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind )
309{
310 ShadowChunk* sc;
311 UInt ml_no;
312 VgAllocKind kind;
313
314 VGP_PUSHCC(VgpCliMalloc);
315 client_malloc_init();
316# ifdef DEBUG_CLIENTMALLOC
317 VG_(printf)("[m %d, f %d (%d)] client_free ( %p, %x )\n",
318 count_malloclists(),
319 count_freelist(), vg_freed_list_volume,
320 ptrV, raw_alloc_kind );
321# endif
322 if (!VG_(clo_instrument)) {
323 VGP_POPCC;
324 VG_(free) ( VG_AR_CLIENT, ptrV );
325 return;
326 }
327
328 /* first, see if ptrV is one vg_client_malloc gave out. */
329 ml_no = VG_MALLOCLIST_NO(ptrV);
330 vg_mlist_frees++;
331 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next) {
332 vg_mlist_tries++;
333 if ((Addr)ptrV == sc->data)
334 break;
335 }
336
337 if (sc == NULL) {
338 VG_(record_free_error) ( (Addr)ptrV );
339 VGP_POPCC;
340 return;
341 }
342
343 switch (raw_alloc_kind) {
344 case 0x5002: kind = Vg_AllocNewVec; break;
345 case 0x5001: kind = Vg_AllocNew; break;
346 case 0x5000:
347 default:
348 kind = Vg_AllocMalloc;
349 /* should only happen if bug in client code */
350 break;
351 }
352
353 /* check if its a matching free() / delete / delete [] */
354 if (kind != sc->allockind)
355 VG_(record_freemismatch_error) ( (Addr) ptrV );
356
357 /* Remove the shadow chunk from the mallocd list. */
358 remove_from_malloclist ( ml_no, sc );
359
360 /* Declare it inaccessible. */
361 VGM_(make_noaccess) ( sc->data - VG_AR_CLIENT_REDZONE_SZB,
362 sc->size + 2*VG_AR_CLIENT_REDZONE_SZB );
363 VGM_(make_noaccess) ( (Addr)sc, sizeof(ShadowChunk) );
364 sc->where = VG_(get_ExeContext)(True);
365
366 /* Put it out of harm's way for a while. */
367 add_to_freed_queue ( sc );
368 VGP_POPCC;
369}
370
371
372
373void* VG_(client_calloc) ( UInt nmemb, UInt size1 )
374{
375 ShadowChunk* sc;
376 Addr p;
377 UInt size, i, ml_no;
378
379 VGP_PUSHCC(VgpCliMalloc);
380 client_malloc_init();
381
382# ifdef DEBUG_CLIENTMALLOC
383 VG_(printf)("[m %d, f %d (%d)] client_calloc ( %d, %d )\n",
384 count_malloclists(),
385 count_freelist(), vg_freed_list_volume,
386 nmemb, size1 );
387# endif
388
389 if (!VG_(clo_instrument)) {
390 VGP_POPCC;
391 return VG_(calloc) ( VG_AR_CLIENT, nmemb, size1 );
392 }
393
394 size = nmemb * size1;
395 p = (Addr)VG_(malloc)(VG_AR_CLIENT, size);
396 sc = VG_(malloc)(VG_AR_PRIVATE, sizeof(ShadowChunk));
397 sc->where = VG_(get_ExeContext)(True);
398 sc->size = size;
399 sc->allockind = Vg_AllocMalloc; /* its a lie - but true. eat this :) */
400 sc->data = p;
401 ml_no = VG_MALLOCLIST_NO(p);
402 sc->next = vg_malloclist[ml_no];
403 vg_malloclist[ml_no] = sc;
404
405 VGM_(make_readable)(p, size);
406 VGM_(make_noaccess)(p + size,
407 VG_AR_CLIENT_REDZONE_SZB);
408 VGM_(make_noaccess)(p - VG_AR_CLIENT_REDZONE_SZB,
409 VG_AR_CLIENT_REDZONE_SZB);
410
411 for (i = 0; i < size; i++) ((UChar*)p)[i] = 0;
412
413 VGP_POPCC;
414 return (void*)p;
415}
416
417
418void* VG_(client_realloc) ( void* ptrV, UInt size_new )
419{
420 ShadowChunk *sc, *sc_new;
421 UInt i, ml_no;
422
423 VGP_PUSHCC(VgpCliMalloc);
424 client_malloc_init();
425
426# ifdef DEBUG_CLIENTMALLOC
427 VG_(printf)("[m %d, f %d (%d)] client_realloc ( %p, %d )\n",
428 count_malloclists(),
429 count_freelist(), vg_freed_list_volume,
430 ptrV, size_new );
431# endif
432
433 if (!VG_(clo_instrument)) {
434 vg_assert(ptrV != NULL && size_new != 0);
435 VGP_POPCC;
436 return VG_(realloc) ( VG_AR_CLIENT, ptrV, size_new );
437 }
438
439 /* First try and find the block. */
440 ml_no = VG_MALLOCLIST_NO(ptrV);
441 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next) {
442 if ((Addr)ptrV == sc->data)
443 break;
444 }
445
446 if (sc == NULL) {
447 VG_(record_free_error) ( (Addr)ptrV );
448 /* Perhaps we should keep going regardless. */
449 VGP_POPCC;
450 return NULL;
451 }
452
453 if (sc->allockind != Vg_AllocMalloc) {
454 /* can not realloc a range that was allocated with new or new [] */
455 VG_(record_freemismatch_error) ( (Addr)ptrV );
456 /* but keep going anyway */
457 }
458
459 if (sc->size == size_new) {
460 /* size unchanged */
461 VGP_POPCC;
462 return ptrV;
463 }
464 if (sc->size > size_new) {
465 /* new size is smaller */
466 VGM_(make_noaccess)( sc->data + size_new,
467 sc->size - size_new );
468 sc->size = size_new;
469 VGP_POPCC;
470 return ptrV;
471 } else {
472 /* new size is bigger */
473 sc_new = client_malloc_shadow ( 0, size_new, Vg_AllocMalloc );
474 for (i = 0; i < sc->size; i++)
475 ((UChar*)(sc_new->data))[i] = ((UChar*)(sc->data))[i];
476 VGM_(copy_address_range_perms) (
477 sc->data, sc_new->data, sc->size );
478 remove_from_malloclist ( VG_MALLOCLIST_NO(sc->data), sc );
479 VGM_(make_noaccess) ( sc->data - VG_AR_CLIENT_REDZONE_SZB,
480 sc->size + 2*VG_AR_CLIENT_REDZONE_SZB );
481 VGM_(make_noaccess) ( (Addr)sc, sizeof(ShadowChunk) );
482 add_to_freed_queue ( sc );
483 VGP_POPCC;
484 return (void*)sc_new->data;
485 }
486}
487
488
489void VG_(clientmalloc_done) ( void )
490{
491 UInt nblocks, nbytes, ml_no;
492 ShadowChunk* sc;
493
494 client_malloc_init();
495
496 nblocks = nbytes = 0;
497
498 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++) {
499 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next) {
500 nblocks ++;
501 nbytes += sc->size;
502 }
503 }
504
505 if (VG_(clo_verbosity) == 0)
506 return;
507
508 VG_(message)(Vg_UserMsg,
509 "malloc/free: in use at exit: %d bytes in %d blocks.",
510 nbytes, nblocks);
511 VG_(message)(Vg_UserMsg,
512 "malloc/free: %d allocs, %d frees, %d bytes allocated.",
513 vg_cmalloc_n_mallocs,
514 vg_cmalloc_n_frees, vg_cmalloc_bs_mallocd);
515 if (!VG_(clo_leak_check))
516 VG_(message)(Vg_UserMsg,
517 "For a detailed leak analysis, rerun with: --leak-check=yes");
518 if (0)
519 VG_(message)(Vg_DebugMsg,
520 "free search: %d tries, %d frees",
521 vg_mlist_tries,
522 vg_mlist_frees );
523 if (VG_(clo_verbosity) > 1)
524 VG_(message)(Vg_UserMsg, "");
525}
526
527
528/* Describe an address as best you can, for error messages,
529 putting the result in ai. */
530
531void VG_(describe_addr) ( Addr a, AddrInfo* ai )
532{
533 ShadowChunk* sc;
534 UInt ml_no;
535 Bool ok;
536
537 /* Perhaps it's a user-def'd block ? */
538 ok = VG_(client_perm_maybe_describe)( a, ai );
539 if (ok)
540 return;
541 /* Perhaps it's on the stack? */
542 if (VG_(is_plausible_stack_addr)(a)
543 && a >= (Addr)VG_(baseBlock)[VGOFF_(m_esp)]) {
544 ai->akind = Stack;
545 return;
546 }
547 /* Search for a freed block which might bracket it. */
548 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next) {
549 if (sc->data - VG_AR_CLIENT_REDZONE_SZB <= a
550 && a < sc->data + sc->size + VG_AR_CLIENT_REDZONE_SZB) {
551 ai->akind = Freed;
552 ai->blksize = sc->size;
553 ai->rwoffset = (Int)(a) - (Int)(sc->data);
554 ai->lastchange = sc->where;
555 return;
556 }
557 }
558 /* Search for a mallocd block which might bracket it. */
559 for (ml_no = 0; ml_no < VG_N_MALLOCLISTS; ml_no++) {
560 for (sc = vg_malloclist[ml_no]; sc != NULL; sc = sc->next) {
561 if (sc->data - VG_AR_CLIENT_REDZONE_SZB <= a
562 && a < sc->data + sc->size + VG_AR_CLIENT_REDZONE_SZB) {
563 ai->akind = Mallocd;
564 ai->blksize = sc->size;
565 ai->rwoffset = (Int)(a) - (Int)(sc->data);
566 ai->lastchange = sc->where;
567 return;
568 }
569 }
570 }
571 /* Clueless ... */
572 ai->akind = Unknown;
573 return;
574}
575
576/*------------------------------------------------------------*/
577/*--- Replace the C library versions with our own. Hairy. ---*/
578/*------------------------------------------------------------*/
579
580/* Below are new versions of malloc, __builtin_new, free,
581 __builtin_delete, calloc and realloc.
582
583 malloc, __builtin_new, free, __builtin_delete, calloc and realloc
584 can be entered either on the real CPU or the simulated one. If on
585 the real one, this is because the dynamic linker is running the
586 static initialisers for C++, before starting up Valgrind itself.
587 In this case it is safe to route calls through to
588 VG_(malloc)/vg_free, since that is self-initialising.
589
590 Once Valgrind is initialised, vg_running_on_simd_CPU becomes True.
591 The call needs to be transferred from the simulated CPU back to the
592 real one and routed to the vg_client_* functions. To do that, the
593 args are passed to vg_trap_here, which the simulator detects. The
594 bogus epilogue fn call is to guarantee that gcc doesn't tailcall
595 vg_trap_here, since that would cause the simulator's detection to
596 fail -- it only checks the targets of call transfers, not jumps.
597 And of course we have to be sure gcc won't inline either the
598 vg_trap_here or vg_bogus_epilogue. Ha ha ha. What a mess.
599*/
600
601/* Place afterwards to guarantee it won't get inlined ... */
602static UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do );
603static void vg_bogus_epilogue ( void );
604
605/* ALL calls to malloc wind up here. */
606void* malloc ( UInt n )
607{
608 if (VG_(clo_trace_malloc))
609 VG_(printf)("malloc[simd=%d](%d)",
610 (UInt)VG_(running_on_simd_CPU), n );
611
612 if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
613
614 vg_cmalloc_n_mallocs ++;
615 vg_cmalloc_bs_mallocd += n;
616
617 if (VG_(running_on_simd_CPU)) {
618 UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4000 );
619 vg_bogus_epilogue();
620 if (VG_(clo_trace_malloc))
621 VG_(printf)(" = %p\n", v );
622 return (void*)v;
623 } else {
624 void* v = VG_(malloc)(VG_AR_CLIENT, n);
625 if (VG_(clo_trace_malloc))
626 VG_(printf)(" = %p\n", v );
627 return (void*)v;
628 }
629}
630
631void* __builtin_new ( UInt n )
632{
633 if (VG_(clo_trace_malloc))
634 VG_(printf)("__builtin_new[simd=%d](%d)",
635 (UInt)VG_(running_on_simd_CPU), n );
636
637 if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
638
639 vg_cmalloc_n_mallocs++;
640 vg_cmalloc_bs_mallocd += n;
641
642 if (VG_(running_on_simd_CPU)) {
643 UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4001 );
644 vg_bogus_epilogue();
645 if (VG_(clo_trace_malloc))
646 VG_(printf)(" = %p\n", v );
647 return (void*)v;
648 } else {
649 void* v = VG_(malloc)(VG_AR_CLIENT, n);
650 if (VG_(clo_trace_malloc))
651 VG_(printf)(" = %p\n", v );
652 return v;
653 }
654}
655
656void* __builtin_vec_new ( Int n )
657{
658 if (VG_(clo_trace_malloc))
659 VG_(printf)("__builtin_vec_new[simd=%d](%d)",
660 (UInt)VG_(running_on_simd_CPU), n );
661
662 if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
663
664 vg_cmalloc_n_mallocs++;
665 vg_cmalloc_bs_mallocd += n;
666
667 if (VG_(running_on_simd_CPU)) {
668 UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4002 );
669 vg_bogus_epilogue();
670 if (VG_(clo_trace_malloc))
671 VG_(printf)(" = %p\n", v );
672 return (void*)v;
673 } else {
674 void* v = VG_(malloc)(VG_AR_CLIENT, n);
675 if (VG_(clo_trace_malloc))
676 VG_(printf)(" = %p\n", v );
677 return v;
678 }
679}
680
681void free ( void* p )
682{
683 if (VG_(clo_trace_malloc))
684 VG_(printf)("free[simd=%d](%p)\n",
685 (UInt)VG_(running_on_simd_CPU), p );
686 vg_cmalloc_n_frees ++;
687
688 if (p == NULL)
689 return;
690 if (VG_(running_on_simd_CPU)) {
691 (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5000 );
692 vg_bogus_epilogue();
693 } else {
694 VG_(free)(VG_AR_CLIENT, p);
695 }
696}
697
698void __builtin_delete ( void* p )
699{
700 if (VG_(clo_trace_malloc))
701 VG_(printf)("__builtin_delete[simd=%d](%p)\n",
702 (UInt)VG_(running_on_simd_CPU), p );
703 vg_cmalloc_n_frees ++;
704
705 if (p == NULL)
706 return;
707 if (VG_(running_on_simd_CPU)) {
708 (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5001 );
709 vg_bogus_epilogue();
710 } else {
711 VG_(free)(VG_AR_CLIENT, p);
712 }
713}
714
715void __builtin_vec_delete ( void* p )
716{
717 if (VG_(clo_trace_malloc))
718 VG_(printf)("__builtin_vec_delete[simd=%d](%p)\n",
719 (UInt)VG_(running_on_simd_CPU), p );
720 vg_cmalloc_n_frees ++;
721
722 if (p == NULL)
723 return;
724 if (VG_(running_on_simd_CPU)) {
725 (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5002 );
726 vg_bogus_epilogue();
727 } else {
728 VG_(free)(VG_AR_CLIENT, p);
729 }
730}
731
732void* calloc ( UInt nmemb, UInt size )
733{
734 if (VG_(clo_trace_malloc))
735 VG_(printf)("calloc[simd=%d](%d,%d)",
736 (UInt)VG_(running_on_simd_CPU), nmemb, size );
737 vg_cmalloc_n_mallocs ++;
738 vg_cmalloc_bs_mallocd += size * nmemb;
739
740 if (VG_(running_on_simd_CPU)) {
741 UInt v = vg_trap_here_WRAPPER ( nmemb, size, 6666 );
742 vg_bogus_epilogue();
743 if (VG_(clo_trace_malloc))
744 VG_(printf)(" = %p\n", v );
745 return (void*)v;
746 } else {
747 void* v = VG_(calloc)(VG_AR_CLIENT, nmemb, size);
748 if (VG_(clo_trace_malloc))
749 VG_(printf)(" = %p\n", v );
750 return v;
751 }
752}
753
754void* realloc ( void* ptrV, UInt new_size )
755{
756 if (VG_(clo_trace_malloc))
757 VG_(printf)("realloc[simd=%d](%p,%d)",
758 (UInt)VG_(running_on_simd_CPU), ptrV, new_size );
759
760 if (VG_(clo_sloppy_malloc))
761 { while ((new_size % 4) > 0) new_size++; }
762
763 vg_cmalloc_n_frees ++;
764 vg_cmalloc_n_mallocs ++;
765 vg_cmalloc_bs_mallocd += new_size;
766
767 if (ptrV == NULL)
768 return malloc(new_size);
769 if (new_size == 0) {
770 free(ptrV);
771 if (VG_(clo_trace_malloc))
772 VG_(printf)(" = 0\n" );
773 return NULL;
774 }
775 if (VG_(running_on_simd_CPU)) {
776 UInt v = vg_trap_here_WRAPPER ( (UInt)ptrV, new_size, 7777 );
777 vg_bogus_epilogue();
778 if (VG_(clo_trace_malloc))
779 VG_(printf)(" = %p\n", v );
780 return (void*)v;
781 } else {
782 void* v = VG_(realloc)(VG_AR_CLIENT, ptrV, new_size);
783 if (VG_(clo_trace_malloc))
784 VG_(printf)(" = %p\n", v );
785 return v;
786 }
787}
788
789void* memalign ( Int alignment, Int n )
790{
791 if (VG_(clo_trace_malloc))
792 VG_(printf)("memalign[simd=%d](al %d, size %d)",
793 (UInt)VG_(running_on_simd_CPU), alignment, n );
794
795 if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; }
796
797 vg_cmalloc_n_mallocs ++;
798 vg_cmalloc_bs_mallocd += n;
799
800 if (VG_(running_on_simd_CPU)) {
801 UInt v = vg_trap_here_WRAPPER ( alignment, n, 8888 );
802 vg_bogus_epilogue();
803 if (VG_(clo_trace_malloc))
804 VG_(printf)(" = %p\n", v );
805 return (void*)v;
806 } else {
807 void* v = VG_(malloc_aligned)(VG_AR_CLIENT, alignment, n);
808 if (VG_(clo_trace_malloc))
809 VG_(printf)(" = %p\n", v );
810 return (void*)v;
811 }
812}
813
814void* valloc ( Int size )
815{
816 return memalign(VKI_BYTES_PER_PAGE, size);
817}
818
819
820/* Various compatibility wrapper functions, for glibc and libstdc++. */
821void cfree ( void* p )
822{
823 free ( p );
824}
825
826void* mallinfo ( void )
827{
828 VG_(message)(Vg_UserMsg,
829 "Warning: incorrectly-handled call to mallinfo()");
830 return NULL;
831}
832
833
834
835int mallopt ( int cmd, int value )
836{
837 /* In glibc-2.2.4, 1 denoted a successful return value for mallopt */
838 return 1;
839}
840
841
842/* Bomb out if we get any of these. */
843void pvalloc ( void )
844{ VG_(panic)("call to pvalloc\n"); }
845
846void malloc_stats ( void )
847{ VG_(panic)("call to malloc_stats\n"); }
848void malloc_usable_size ( void )
849{ VG_(panic)("call to malloc_usable_size\n"); }
850void malloc_trim ( void )
851{ VG_(panic)("call to malloc_trim\n"); }
852void malloc_get_state ( void )
853{ VG_(panic)("call to malloc_get_state\n"); }
854void malloc_set_state ( void )
855{ VG_(panic)("call to malloc_set_state\n"); }
856
857
858int __posix_memalign ( void **memptr, UInt alignment, UInt size )
859{
860 void *mem;
861
862 /* Test whether the SIZE argument is valid. It must be a power of
863 two multiple of sizeof (void *). */
864 if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
865 return 22 /*EINVAL*/;
866
867 mem = memalign (alignment, size);
868
869 if (mem != NULL) {
870 *memptr = mem;
871 return 0;
872 }
873
874 return 12 /*ENOMEM*/;
875}
876
877
878/*------------------------------------------------------------*/
879/*--- Magic supporting hacks. ---*/
880/*------------------------------------------------------------*/
881
882extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do );
883
884static
885UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do )
886{
887 /* The point of this idiocy is to make a plain, ordinary call to
888 vg_trap_here which vg_dispatch_when_CALL can spot. Left to
889 itself, with -fpic, gcc generates "call vg_trap_here@PLT" which
890 doesn't get spotted, for whatever reason. I guess I could check
891 _all_ control flow transfers, but that would be an undesirable
892 performance overhead.
893
894 If you compile without -fpic, gcc generates the obvious call
895 insn, so the wrappers below will work if they just call
896 vg_trap_here. But I don't want to rule out building with -fpic,
897 hence this hack. Sigh.
898 */
899 UInt v;
900
901# define WHERE_TO VG_(trap_here)
902# define STRINGIFY(xx) __STRING(xx)
903
904 asm("# call to vg_trap_here\n"
905 "\t pushl %3\n"
906 "\t pushl %2\n"
907 "\t pushl %1\n"
908 "\t call " STRINGIFY(WHERE_TO) "\n"
909 "\t addl $12, %%esp\n"
910 "\t movl %%eax, %0\n"
911 : "=r" (v)
912 : "r" (arg1), "r" (arg2), "r" (what_to_do)
913 : "eax", "esp", "cc", "memory");
914 return v;
915
916# undef WHERE_TO
917# undef STRINGIFY
918}
919
920/* Last, but not least ... */
921void vg_bogus_epilogue ( void )
922{
923 /* Runs on simulated CPU only. */
924}
925
926UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do )
927{
928 /* Calls to this fn are detected in vg_dispatch.S and are handled
929 specially. So this fn should never be entered. */
930 VG_(panic)("vg_trap_here called!");
931 return 0; /*NOTREACHED*/
932}
933
934
935/*--------------------------------------------------------------------*/
936/*--- end vg_clientmalloc.c ---*/
937/*--------------------------------------------------------------------*/