blob: 630c9c7e15cedb382843afdef3e4cc332c8bc43e [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2000-2013 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
philippe6643e962012-01-17 21:16:30 +000034#include "pub_tool_poolalloc.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h"
36#include "pub_tool_libcbase.h"
37#include "pub_tool_libcassert.h"
38#include "pub_tool_libcprint.h"
39#include "pub_tool_mallocfree.h"
40#include "pub_tool_options.h"
41#include "pub_tool_replacemalloc.h"
42#include "pub_tool_threadstate.h"
43#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000044#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
njn1d0825f2006-03-27 11:37:07 +000045
46#include "mc_include.h"
47
48/*------------------------------------------------------------*/
49/*--- Defns ---*/
50/*------------------------------------------------------------*/
51
52/* Stats ... */
53static SizeT cmalloc_n_mallocs = 0;
54static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000055static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000056
sewardjc740d762006-10-05 17:59:23 +000057/* For debug printing to do with mempools: what stack trace
58 depth to show. */
59#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
60
njn1d0825f2006-03-27 11:37:07 +000061
62/*------------------------------------------------------------*/
63/*--- Tracking malloc'd and free'd blocks ---*/
64/*------------------------------------------------------------*/
65
philipped99c26a2012-07-31 22:17:28 +000066SizeT MC_(Malloc_Redzone_SzB) = -10000000; // If used before set, should BOMB
67
njn1d0825f2006-03-27 11:37:07 +000068/* Record malloc'd blocks. */
florian09a4c792014-10-18 10:58:05 +000069VgHashTable *MC_(malloc_list) = NULL;
njn1d0825f2006-03-27 11:37:07 +000070
sewardj62b91042011-01-23 20:45:53 +000071/* Memory pools: a hash table of MC_Mempools. Search key is
72 MC_Mempool::pool. */
florian09a4c792014-10-18 10:58:05 +000073VgHashTable *MC_(mempool_list) = NULL;
philippe6643e962012-01-17 21:16:30 +000074
75/* Pool allocator for MC_Chunk. */
76PoolAlloc *MC_(chunk_poolalloc) = NULL;
77static
philippe8617b5b2013-01-12 19:53:08 +000078MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
philippe6643e962012-01-17 21:16:30 +000079 MC_AllocKind kind);
80static inline
81void delete_MC_Chunk (MC_Chunk* mc);
82
njn1d0825f2006-03-27 11:37:07 +000083/* Records blocks after freeing. */
sewardj403d8aa2011-10-22 19:48:57 +000084/* Blocks freed by the client are queued in one of two lists of
85 freed blocks not yet physically freed:
86 "big blocks" freed list.
87 "small blocks" freed list
88 The blocks with a size >= MC_(clo_freelist_big_blocks)
89 are linked in the big blocks freed list.
90 This allows a client to allocate and free big blocks
91 (e.g. bigger than VG_(clo_freelist_vol)) without losing
92 immediately all protection against dangling pointers.
93 position [0] is for big blocks, [1] is for small blocks. */
94static MC_Chunk* freed_list_start[2] = {NULL, NULL};
95static MC_Chunk* freed_list_end[2] = {NULL, NULL};
njn1d0825f2006-03-27 11:37:07 +000096
97/* Put a shadow chunk on the freed blocks queue, possibly freeing up
98 some of the oldest blocks in the queue at the same time. */
99static void add_to_freed_queue ( MC_Chunk* mc )
100{
sewardjfa4ca3b2007-11-30 17:19:36 +0000101 const Bool show = False;
sewardj403d8aa2011-10-22 19:48:57 +0000102 const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
sewardjfa4ca3b2007-11-30 17:19:36 +0000103
sewardj403d8aa2011-10-22 19:48:57 +0000104 /* Put it at the end of the freed list, unless the block
105 would be directly released any way : in this case, we
106 put it at the head of the freed list. */
107 if (freed_list_end[l] == NULL) {
108 tl_assert(freed_list_start[l] == NULL);
109 mc->next = NULL;
110 freed_list_end[l] = freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000111 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000112 tl_assert(freed_list_end[l]->next == NULL);
113 if (mc->szB >= MC_(clo_freelist_vol)) {
114 mc->next = freed_list_start[l];
115 freed_list_start[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000116 } else {
sewardj403d8aa2011-10-22 19:48:57 +0000117 mc->next = NULL;
118 freed_list_end[l]->next = mc;
119 freed_list_end[l] = mc;
njn1d0825f2006-03-27 11:37:07 +0000120 }
sewardj403d8aa2011-10-22 19:48:57 +0000121 }
122 VG_(free_queue_volume) += (Long)mc->szB;
123 if (show)
124 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
125 VG_(free_queue_volume));
126 VG_(free_queue_length)++;
127}
njn1d0825f2006-03-27 11:37:07 +0000128
sewardj403d8aa2011-10-22 19:48:57 +0000129/* Release enough of the oldest blocks to bring the free queue
130 volume below vg_clo_freelist_vol.
131 Start with big block list first.
132 On entry, VG_(free_queue_volume) must be > MC_(clo_freelist_vol).
133 On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
134static void release_oldest_block(void)
135{
136 const Bool show = False;
137 int i;
138 tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
139 tl_assert (freed_list_start[0] != NULL || freed_list_start[1] != NULL);
140
141 for (i = 0; i < 2; i++) {
142 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)
143 && freed_list_start[i] != NULL) {
144 MC_Chunk* mc1;
145
146 tl_assert(freed_list_end[i] != NULL);
147
148 mc1 = freed_list_start[i];
149 VG_(free_queue_volume) -= (Long)mc1->szB;
150 VG_(free_queue_length)--;
151 if (show)
152 VG_(printf)("mc_freelist: discard: volume now %lld\n",
153 VG_(free_queue_volume));
154 tl_assert(VG_(free_queue_volume) >= 0);
155
156 if (freed_list_start[i] == freed_list_end[i]) {
157 freed_list_start[i] = freed_list_end[i] = NULL;
158 } else {
159 freed_list_start[i] = mc1->next;
160 }
161 mc1->next = NULL; /* just paranoia */
162
163 /* free MC_Chunk */
164 if (MC_AllocCustom != mc1->allockind)
165 VG_(cli_free) ( (void*)(mc1->data) );
philippe6643e962012-01-17 21:16:30 +0000166 delete_MC_Chunk ( mc1 );
sewardj403d8aa2011-10-22 19:48:57 +0000167 }
njn1d0825f2006-03-27 11:37:07 +0000168 }
169}
170
sewardj403d8aa2011-10-22 19:48:57 +0000171MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
njn1d0825f2006-03-27 11:37:07 +0000172{
sewardj403d8aa2011-10-22 19:48:57 +0000173 int i;
174 for (i = 0; i < 2; i++) {
175 MC_Chunk* mc;
176 mc = freed_list_start[i];
177 while (mc) {
178 if (VG_(addr_is_in_block)( a, mc->data, mc->szB,
philipped99c26a2012-07-31 22:17:28 +0000179 MC_(Malloc_Redzone_SzB) ))
sewardj403d8aa2011-10-22 19:48:57 +0000180 return mc;
181 mc = mc->next;
182 }
183 }
184 return NULL;
njn1d0825f2006-03-27 11:37:07 +0000185}
186
sewardj403d8aa2011-10-22 19:48:57 +0000187/* Allocate a shadow chunk, put it on the appropriate list.
188 If needed, release oldest blocks from freed list. */
njn1d0825f2006-03-27 11:37:07 +0000189static
philippe8617b5b2013-01-12 19:53:08 +0000190MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000191 MC_AllocKind kind)
192{
philippe6643e962012-01-17 21:16:30 +0000193 MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
njn1d0825f2006-03-27 11:37:07 +0000194 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000195 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000196 mc->allockind = kind;
philippe8617b5b2013-01-12 19:53:08 +0000197 switch ( MC_(n_where_pointers)() ) {
198 case 2: mc->where[1] = 0; // fallback to 1
199 case 1: mc->where[0] = 0; // fallback to 0
200 case 0: break;
201 default: tl_assert(0);
202 }
203 MC_(set_allocated_at) (tid, mc);
njn1d0825f2006-03-27 11:37:07 +0000204
sewardj403d8aa2011-10-22 19:48:57 +0000205 /* Each time a new MC_Chunk is created, release oldest blocks
206 if the free list volume is exceeded. */
207 if (VG_(free_queue_volume) > MC_(clo_freelist_vol))
208 release_oldest_block();
209
njn1d0825f2006-03-27 11:37:07 +0000210 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
211 the mc->data field isn't visible to the leak checker. If memory
212 management is working correctly, any pointer returned by VG_(malloc)
213 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000214 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000215 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
216 }
217 return mc;
218}
219
philippe6643e962012-01-17 21:16:30 +0000220static inline
221void delete_MC_Chunk (MC_Chunk* mc)
222{
223 VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
224}
225
philippe8617b5b2013-01-12 19:53:08 +0000226// True if mc is in the given block list.
florian09a4c792014-10-18 10:58:05 +0000227static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
philippe8617b5b2013-01-12 19:53:08 +0000228{
229 MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
230 if (found_mc) {
231 tl_assert (found_mc->data == mc->data);
232 /* If a user builds a pool from a malloc-ed superblock
233 and uses VALGRIND_MALLOCLIKE_BLOCK to "mark"
234 an address at the beginning of this superblock, then
235 this address will be twice in the block_list.
236 We handle this case by checking size and allockind.
237 Note: I suspect that having the same block
238 twice in MC_(malloc_list) is a recipe for bugs.
239 We might maybe better create a "standard" mempool to
240 handle all this more cleanly. */
241 if (found_mc->szB != mc->szB
242 || found_mc->allockind != mc->allockind)
243 return False;
244 tl_assert (found_mc == mc);
245 return True;
246 } else
247 return False;
248}
249
250// True if mc is a live block (not yet freed).
251static Bool live_block (MC_Chunk* mc)
252{
253 if (mc->allockind == MC_AllocCustom) {
254 MC_Mempool* mp;
255 VG_(HT_ResetIter)(MC_(mempool_list));
256 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
257 if ( in_block_list (mp->chunks, mc) )
258 return True;
259 }
260 }
261 /* Note: we fallback here for a not found MC_AllocCustom
262 as such a block can be inserted in MC_(malloc_list)
263 by VALGRIND_MALLOCLIKE_BLOCK. */
264 return in_block_list ( MC_(malloc_list), mc );
265}
266
267ExeContext* MC_(allocated_at) (MC_Chunk* mc)
268{
269 switch (MC_(clo_keep_stacktraces)) {
270 case KS_none: return VG_(null_ExeContext) ();
271 case KS_alloc: return mc->where[0];
272 case KS_free: return VG_(null_ExeContext) ();
273 case KS_alloc_then_free: return (live_block(mc) ?
274 mc->where[0] : VG_(null_ExeContext) ());
275 case KS_alloc_and_free: return mc->where[0];
276 default: tl_assert (0);
277 }
278}
279
280ExeContext* MC_(freed_at) (MC_Chunk* mc)
281{
282 switch (MC_(clo_keep_stacktraces)) {
283 case KS_none: return VG_(null_ExeContext) ();
284 case KS_alloc: return VG_(null_ExeContext) ();
285 case KS_free: return (mc->where[0] ?
286 mc->where[0] : VG_(null_ExeContext) ());
287 case KS_alloc_then_free: return (live_block(mc) ?
288 VG_(null_ExeContext) () : mc->where[0]);
289 case KS_alloc_and_free: return (mc->where[1] ?
290 mc->where[1] : VG_(null_ExeContext) ());
291 default: tl_assert (0);
292 }
293}
294
295void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
296{
297 switch (MC_(clo_keep_stacktraces)) {
298 case KS_none: return;
299 case KS_alloc: break;
300 case KS_free: return;
301 case KS_alloc_then_free: break;
302 case KS_alloc_and_free: break;
303 default: tl_assert (0);
304 }
305 mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
306}
307
308void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
309{
310 UInt pos;
311 switch (MC_(clo_keep_stacktraces)) {
312 case KS_none: return;
313 case KS_alloc: return;
314 case KS_free: pos = 0; break;
315 case KS_alloc_then_free: pos = 0; break;
316 case KS_alloc_and_free: pos = 1; break;
317 default: tl_assert (0);
318 }
319 mc->where[pos] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
320}
321
322UInt MC_(n_where_pointers) (void)
323{
324 switch (MC_(clo_keep_stacktraces)) {
325 case KS_none: return 0;
326 case KS_alloc:
327 case KS_free:
328 case KS_alloc_then_free: return 1;
329 case KS_alloc_and_free: return 2;
330 default: tl_assert (0);
331 }
332}
333
njn1d0825f2006-03-27 11:37:07 +0000334/*------------------------------------------------------------*/
335/*--- client_malloc(), etc ---*/
336/*------------------------------------------------------------*/
337
njn1d0825f2006-03-27 11:37:07 +0000338/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000339void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000340 Addr p, SizeT szB, SizeT alignB,
florian09a4c792014-10-18 10:58:05 +0000341 Bool is_zeroed, MC_AllocKind kind, VgHashTable *table)
njn1d0825f2006-03-27 11:37:07 +0000342{
philippe8617b5b2013-01-12 19:53:08 +0000343 MC_Chunk* mc;
sewardj7cf4e6b2008-05-01 20:24:26 +0000344
njn1d0825f2006-03-27 11:37:07 +0000345 // Allocate and zero if necessary
346 if (p) {
347 tl_assert(MC_AllocCustom == kind);
348 } else {
349 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000350 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000351 if (!p) {
352 return NULL;
353 }
sewardjeb0fa932007-11-30 21:41:40 +0000354 if (is_zeroed) {
355 VG_(memset)((void*)p, 0, szB);
356 } else
357 if (MC_(clo_malloc_fill) != -1) {
358 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
359 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
360 }
njn1d0825f2006-03-27 11:37:07 +0000361 }
362
florian5544d5b2011-12-30 03:09:45 +0000363 // Only update stats if allocation succeeded.
364 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000365 cmalloc_bs_mallocd += (ULong)szB;
philippe8617b5b2013-01-12 19:53:08 +0000366 mc = create_MC_Chunk (tid, p, szB, kind);
367 VG_(HT_add_node)( table, mc );
njn1d0825f2006-03-27 11:37:07 +0000368
369 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000370 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000371 else {
philippe8617b5b2013-01-12 19:53:08 +0000372 UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
sewardj7cf4e6b2008-05-01 20:24:26 +0000373 tl_assert(VG_(is_plausible_ECU)(ecu));
374 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
375 }
njn1d0825f2006-03-27 11:37:07 +0000376
377 return (void*)p;
378}
379
380void* MC_(malloc) ( ThreadId tid, SizeT n )
381{
florian7b6899d2014-07-13 14:41:55 +0000382 if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000383 return NULL;
384 } else {
385 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000386 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000387 }
388}
389
390void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
391{
florian7b6899d2014-07-13 14:41:55 +0000392 if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000393 return NULL;
394 } else {
395 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000396 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000397 }
398}
399
400void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
401{
florian7b6899d2014-07-13 14:41:55 +0000402 if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000403 return NULL;
404 } else {
405 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000406 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000407 }
408}
409
njn718d3b12006-12-16 00:54:12 +0000410void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000411{
florian7b6899d2014-07-13 14:41:55 +0000412 if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
njn1d0825f2006-03-27 11:37:07 +0000413 return NULL;
414 } else {
njn718d3b12006-12-16 00:54:12 +0000415 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000416 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000417 }
418}
419
420void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
421{
florian7b6899d2014-07-13 14:41:55 +0000422 if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
423 MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
njn1d0825f2006-03-27 11:37:07 +0000424 return NULL;
425 } else {
426 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000427 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000428 }
429}
430
431static
432void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
433{
philippea2cc0c02012-05-11 22:10:39 +0000434 /* Note: we do not free fill the custom allocs produced
435 by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
436 if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
sewardjeb0fa932007-11-30 21:41:40 +0000437 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
438 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
439 }
440
njn1d0825f2006-03-27 11:37:07 +0000441 /* Note: make redzones noaccess again -- just in case user made them
442 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000443 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000444
bartc1fc1332010-09-02 10:24:49 +0000445 /* Record where freed */
philippe8617b5b2013-01-12 19:53:08 +0000446 MC_(set_freed_at) (tid, mc);
bartc1fc1332010-09-02 10:24:49 +0000447 /* Put it out of harm's way for a while */
448 add_to_freed_queue ( mc );
sewardj403d8aa2011-10-22 19:48:57 +0000449 /* If the free list volume is bigger than MC_(clo_freelist_vol),
450 we wait till the next block allocation to release blocks.
451 This increase the chance to discover dangling pointer usage,
452 even for big blocks being freed by the client. */
njn1d0825f2006-03-27 11:37:07 +0000453}
454
philippe8617b5b2013-01-12 19:53:08 +0000455
456static
457void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
458{
sewardj021e6b62014-08-22 19:26:23 +0000459 /* Only show such an error if the user hasn't disabled doing so. */
460 if (!MC_(clo_show_mismatched_frees))
461 return;
462
philippe8617b5b2013-01-12 19:53:08 +0000463 /* MC_(record_freemismatch_error) reports errors for still
464 allocated blocks but we are in the middle of freeing it. To
465 report the error correctly, we re-insert the chunk (making it
466 again a "clean allocated block", report the error, and then
467 re-remove the chunk. This avoids to do a VG_(HT_lookup)
468 followed by a VG_(HT_remove) in all "non-erroneous cases". */
469 VG_(HT_add_node)( MC_(malloc_list), mc );
470 MC_(record_freemismatch_error) ( tid, mc );
471 if ((mc != VG_(HT_remove) ( MC_(malloc_list), (UWord)mc->data )))
472 tl_assert(0);
473}
474
njn1d0825f2006-03-27 11:37:07 +0000475void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
476{
477 MC_Chunk* mc;
478
479 cmalloc_n_frees++;
480
481 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
482 if (mc == NULL) {
483 MC_(record_free_error) ( tid, p );
484 } else {
485 /* check if it is a matching free() / delete / delete [] */
486 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000487 tl_assert(p == mc->data);
philippe8617b5b2013-01-12 19:53:08 +0000488 record_freemismatch_error ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000489 }
490 die_and_free_mem ( tid, mc, rzB );
491 }
492}
493
494void MC_(free) ( ThreadId tid, void* p )
495{
496 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000497 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000498}
499
500void MC_(__builtin_delete) ( ThreadId tid, void* p )
501{
502 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000503 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
njn1d0825f2006-03-27 11:37:07 +0000504}
505
506void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
507{
508 MC_(handle_free)(
philipped99c26a2012-07-31 22:17:28 +0000509 tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
njn1d0825f2006-03-27 11:37:07 +0000510}
511
njn718d3b12006-12-16 00:54:12 +0000512void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000513{
philippe8617b5b2013-01-12 19:53:08 +0000514 MC_Chunk* old_mc;
515 MC_Chunk* new_mc;
516 Addr a_new;
njn718d3b12006-12-16 00:54:12 +0000517 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000518
florian7b6899d2014-07-13 14:41:55 +0000519 if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
florian5544d5b2011-12-30 03:09:45 +0000520 return NULL;
521
njn1d0825f2006-03-27 11:37:07 +0000522 cmalloc_n_frees ++;
523 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000524 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000525
njn1d0825f2006-03-27 11:37:07 +0000526 /* Remove the old block */
philippe8617b5b2013-01-12 19:53:08 +0000527 old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
528 if (old_mc == NULL) {
njn1d0825f2006-03-27 11:37:07 +0000529 MC_(record_free_error) ( tid, (Addr)p_old );
530 /* We return to the program regardless. */
531 return NULL;
532 }
533
534 /* check if its a matching free() / delete / delete [] */
philippe8617b5b2013-01-12 19:53:08 +0000535 if (MC_AllocMalloc != old_mc->allockind) {
njn1d0825f2006-03-27 11:37:07 +0000536 /* can not realloc a range that was allocated with new or new [] */
philippe8617b5b2013-01-12 19:53:08 +0000537 tl_assert((Addr)p_old == old_mc->data);
538 record_freemismatch_error ( tid, old_mc );
njn1d0825f2006-03-27 11:37:07 +0000539 /* but keep going anyway */
540 }
541
philippe8617b5b2013-01-12 19:53:08 +0000542 old_szB = old_mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000543
philippe8617b5b2013-01-12 19:53:08 +0000544 /* Get new memory */
545 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
sewardjb3238a52008-07-29 09:44:52 +0000546
philippe8617b5b2013-01-12 19:53:08 +0000547 if (a_new) {
548 /* In all cases, even when the new size is smaller or unchanged, we
549 reallocate and copy the contents, and make the old block
550 inaccessible. This is so as to guarantee to catch all cases of
551 accesses via the old address after reallocation, regardless of
552 the change in size. (Of course the ability to detect accesses
553 to the old block also depends on the size of the freed blocks
554 queue). */
sewardj8849a562008-07-22 18:23:16 +0000555
philippe8617b5b2013-01-12 19:53:08 +0000556 // Allocate a new chunk.
557 new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );
sewardj8849a562008-07-22 18:23:16 +0000558
philippe8617b5b2013-01-12 19:53:08 +0000559 // Now insert the new mc (with a new 'data' field) into malloc_list.
560 VG_(HT_add_node)( MC_(malloc_list), new_mc );
sewardj8849a562008-07-22 18:23:16 +0000561
philippe8617b5b2013-01-12 19:53:08 +0000562 /* Retained part is copied, red zones set as normal */
563
564 /* Redzone at the front */
565 MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB),
566 MC_(Malloc_Redzone_SzB) );
567
568 /* payload */
569 if (old_szB >= new_szB) {
570 /* new size is smaller or the same */
571
572 /* Copy address range state and value from old to new */
sewardj8849a562008-07-22 18:23:16 +0000573 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
sewardj8849a562008-07-22 18:23:16 +0000574 VG_(memcpy)((void*)a_new, p_old, new_szB);
philippe8617b5b2013-01-12 19:53:08 +0000575 } else {
576 /* new size is bigger */
sewardj7cf4e6b2008-05-01 20:24:26 +0000577 UInt ecu;
sewardj7cf4e6b2008-05-01 20:24:26 +0000578
philippe8617b5b2013-01-12 19:53:08 +0000579 /* Copy address range state and value from old to new */
580 MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
581 VG_(memcpy)((void*)a_new, p_old, old_szB);
582
583 // If the block has grown, we mark the grown area as undefined.
584 // We have to do that after VG_(HT_add_node) to ensure the ecu
585 // execontext is for a fully allocated block.
586 ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
sewardj7cf4e6b2008-05-01 20:24:26 +0000587 tl_assert(VG_(is_plausible_ECU)(ecu));
philippe8617b5b2013-01-12 19:53:08 +0000588 MC_(make_mem_undefined_w_otag)( a_new+old_szB,
589 new_szB-old_szB,
590 ecu | MC_OKIND_HEAP );
njn1d0825f2006-03-27 11:37:07 +0000591
sewardjeb0fa932007-11-30 21:41:40 +0000592 /* Possibly fill new area with specified junk */
593 if (MC_(clo_malloc_fill) != -1) {
594 tl_assert(MC_(clo_malloc_fill) >= 0x00
595 && MC_(clo_malloc_fill) <= 0xFF);
596 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
597 new_szB-old_szB);
598 }
njn1d0825f2006-03-27 11:37:07 +0000599 }
600
philippe8617b5b2013-01-12 19:53:08 +0000601 /* Redzone at the back. */
602 MC_(make_mem_noaccess) ( a_new+new_szB, MC_(Malloc_Redzone_SzB));
njn1d0825f2006-03-27 11:37:07 +0000603
philippe8617b5b2013-01-12 19:53:08 +0000604 /* Possibly fill freed area with specified junk. */
605 if (MC_(clo_free_fill) != -1) {
606 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
607 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
608 }
njn1d0825f2006-03-27 11:37:07 +0000609
philippe8617b5b2013-01-12 19:53:08 +0000610 /* Free old memory */
611 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
612 than recycling the old one, so that any erroneous accesses to the
613 old memory are reported. */
614 die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );
615
616 } else {
617 /* Could not allocate new client memory.
618 Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
619 unconditionally removed at the beginning of the function. */
620 VG_(HT_add_node)( MC_(malloc_list), old_mc );
621 }
622
623 return (void*)a_new;
njn1d0825f2006-03-27 11:37:07 +0000624}
625
njn8b140de2009-02-17 04:31:18 +0000626SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
627{
628 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
629
630 // There may be slop, but pretend there isn't because only the asked-for
631 // area will be marked as addressable.
632 return ( mc ? mc->szB : 0 );
633}
634
bart91347382011-03-25 20:07:25 +0000635/* This handles the in place resize of a block, as performed by the
636 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
637 and not used for, handling of the normal libc realloc()
638 function. */
639void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
640 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
641{
642 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
643 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
644 /* Reject if: p is not found, or oldSizeB is wrong,
645 or new block would be empty. */
646 MC_(record_free_error) ( tid, p );
647 return;
648 }
649
650 if (oldSizeB == newSizeB)
651 return;
652
653 mc->szB = newSizeB;
654 if (newSizeB < oldSizeB) {
655 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
656 } else {
657 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
658 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
659 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
660 ecu | MC_OKIND_HEAP );
661 if (rzB > 0)
662 MC_(make_mem_noaccess)( p + newSizeB, rzB );
663 }
664}
665
njn017d3772009-05-19 02:10:26 +0000666
sewardj62b91042011-01-23 20:45:53 +0000667/*------------------------------------------------------------*/
668/*--- Memory pool stuff. ---*/
669/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000670
sewardj7e30be42011-01-27 23:56:36 +0000671/* Set to 1 for intensive sanity checking. Is very expensive though
672 and should not be used in production scenarios. See #255966. */
673#define MP_DETAILED_SANITY_CHECKS 0
674
675static void check_mempool_sane(MC_Mempool* mp); /*forward*/
676
677
njn1d0825f2006-03-27 11:37:07 +0000678void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
679{
sewardjc740d762006-10-05 17:59:23 +0000680 MC_Mempool* mp;
681
682 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000683 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
sewardjc740d762006-10-05 17:59:23 +0000684 pool, rzB, is_zeroed);
685 VG_(get_and_pp_StackTrace)
686 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
687 }
688
689 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
690 if (mp != NULL) {
691 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
692 }
693
sewardj9c606bd2008-09-18 18:12:50 +0000694 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000695 mp->pool = pool;
696 mp->rzB = rzB;
697 mp->is_zeroed = is_zeroed;
sewardj3f94a7d2007-08-25 07:19:08 +0000698 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
sewardj7e30be42011-01-27 23:56:36 +0000699 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000700
701 /* Paranoia ... ensure this area is off-limits to the client, so
702 the mp->data field isn't visible to the leak checker. If memory
703 management is working correctly, anything pointer returned by
704 VG_(malloc) should be noaccess as far as the client is
705 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000706 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000707 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
708 }
709
710 VG_(HT_add_node)( MC_(mempool_list), mp );
711}
712
713void MC_(destroy_mempool)(Addr pool)
714{
715 MC_Chunk* mc;
716 MC_Mempool* mp;
717
sewardjc740d762006-10-05 17:59:23 +0000718 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000719 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
sewardjc740d762006-10-05 17:59:23 +0000720 VG_(get_and_pp_StackTrace)
721 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
722 }
723
njn1d0825f2006-03-27 11:37:07 +0000724 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
725
726 if (mp == NULL) {
727 ThreadId tid = VG_(get_running_tid)();
728 MC_(record_illegal_mempool_error) ( tid, pool );
729 return;
730 }
sewardj7e30be42011-01-27 23:56:36 +0000731 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000732
733 // Clean up the chunks, one by one
734 VG_(HT_ResetIter)(mp->chunks);
735 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
736 /* Note: make redzones noaccess again -- just in case user made them
737 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000738 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000739 }
740 // Destroy the chunk table
philippe6643e962012-01-17 21:16:30 +0000741 VG_(HT_destruct)(mp->chunks, (void (*)(void *))delete_MC_Chunk);
njn1d0825f2006-03-27 11:37:07 +0000742
743 VG_(free)(mp);
744}
745
sewardjc740d762006-10-05 17:59:23 +0000746static Int
florian6bd9dc12012-11-23 16:17:43 +0000747mp_compar(const void* n1, const void* n2)
sewardjc740d762006-10-05 17:59:23 +0000748{
florian3e798632012-11-24 19:41:54 +0000749 const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
750 const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000751 if (mc1->data < mc2->data) return -1;
752 if (mc1->data > mc2->data) return 1;
753 return 0;
sewardjc740d762006-10-05 17:59:23 +0000754}
755
756static void
757check_mempool_sane(MC_Mempool* mp)
758{
759 UInt n_chunks, i, bad = 0;
760 static UInt tick = 0;
761
762 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
763 if (!chunks)
764 return;
765
766 if (VG_(clo_verbosity) > 1) {
767 if (tick++ >= 10000)
768 {
769 UInt total_pools = 0, total_chunks = 0;
770 MC_Mempool* mp2;
771
772 VG_(HT_ResetIter)(MC_(mempool_list));
773 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
774 total_pools++;
775 VG_(HT_ResetIter)(mp2->chunks);
776 while (VG_(HT_Next)(mp2->chunks)) {
777 total_chunks++;
778 }
779 }
780
sewardj6b523cd2009-07-15 14:49:40 +0000781 VG_(message)(Vg_UserMsg,
sewardjc740d762006-10-05 17:59:23 +0000782 "Total mempools active: %d pools, %d chunks\n",
783 total_pools, total_chunks);
784 tick = 0;
785 }
786 }
787
788
789 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
790
791 /* Sanity check; assert that the blocks are now in order */
792 for (i = 0; i < n_chunks-1; i++) {
793 if (chunks[i]->data > chunks[i+1]->data) {
794 VG_(message)(Vg_UserMsg,
795 "Mempool chunk %d / %d is out of order "
sewardj6b523cd2009-07-15 14:49:40 +0000796 "wrt. its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000797 i+1, n_chunks);
798 bad = 1;
799 }
800 }
801
802 /* Sanity check -- make sure they don't overlap */
803 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000804 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000805 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000806 "Mempool chunk %d / %d overlaps with its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000807 i+1, n_chunks);
808 bad = 1;
809 }
810 }
811
812 if (bad) {
813 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000814 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
815 n_chunks);
sewardjc740d762006-10-05 17:59:23 +0000816 for (i = 0; i < n_chunks; ++i) {
817 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000818 "Mempool chunk %d / %d: %ld bytes "
819 "[%lx,%lx), allocated:\n",
sewardjc740d762006-10-05 17:59:23 +0000820 i+1,
821 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000822 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000823 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000824 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000825
philippe8617b5b2013-01-12 19:53:08 +0000826 VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
sewardjc740d762006-10-05 17:59:23 +0000827 }
828 }
829 VG_(free)(chunks);
830}
831
njn718d3b12006-12-16 00:54:12 +0000832void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000833{
sewardjc740d762006-10-05 17:59:23 +0000834 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000835
sewardjc740d762006-10-05 17:59:23 +0000836 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000837 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
838 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000839 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
840 }
841
842 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000843 if (mp == NULL) {
844 MC_(record_illegal_mempool_error) ( tid, pool );
845 } else {
sewardj7e30be42011-01-27 23:56:36 +0000846 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000847 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000848 MC_AllocCustom, mp->chunks);
philippee529b872012-07-05 21:11:12 +0000849 if (mp->rzB > 0) {
850 // This is not needed if the user application has properly
851 // marked the superblock noaccess when defining the mempool.
852 // We however still mark the redzones noaccess to still catch
853 // some bugs if user forgot.
854 MC_(make_mem_noaccess) ( addr - mp->rzB, mp->rzB);
855 MC_(make_mem_noaccess) ( addr + szB, mp->rzB);
856 }
sewardj7e30be42011-01-27 23:56:36 +0000857 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000858 }
859}
860
861void MC_(mempool_free)(Addr pool, Addr addr)
862{
863 MC_Mempool* mp;
864 MC_Chunk* mc;
865 ThreadId tid = VG_(get_running_tid)();
866
867 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
868 if (mp == NULL) {
869 MC_(record_illegal_mempool_error)(tid, pool);
870 return;
871 }
872
sewardjc740d762006-10-05 17:59:23 +0000873 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000874 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000875 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
876 }
877
sewardj7e30be42011-01-27 23:56:36 +0000878 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000879 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
880 if (mc == NULL) {
881 MC_(record_free_error)(tid, (Addr)addr);
882 return;
883 }
884
sewardjc740d762006-10-05 17:59:23 +0000885 if (VG_(clo_verbosity) > 2) {
886 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000887 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
barta0b6b2c2008-07-07 06:49:24 +0000888 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000889 }
890
njn1d0825f2006-03-27 11:37:07 +0000891 die_and_free_mem ( tid, mc, mp->rzB );
sewardj7e30be42011-01-27 23:56:36 +0000892 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000893}
894
sewardj2c1c9df2006-07-28 00:06:37 +0000895
njn718d3b12006-12-16 00:54:12 +0000896void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000897{
898 MC_Mempool* mp;
899 MC_Chunk* mc;
900 ThreadId tid = VG_(get_running_tid)();
901 UInt n_shadows, i;
902 VgHashNode** chunks;
903
sewardjc740d762006-10-05 17:59:23 +0000904 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000905 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
906 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000907 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
908 }
909
sewardj2c1c9df2006-07-28 00:06:37 +0000910 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
911 if (mp == NULL) {
912 MC_(record_illegal_mempool_error)(tid, pool);
913 return;
914 }
915
sewardjc740d762006-10-05 17:59:23 +0000916 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000917 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
918 if (n_shadows == 0) {
919 tl_assert(chunks == NULL);
920 return;
921 }
922
923 tl_assert(chunks != NULL);
924 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000925
sewardjc740d762006-10-05 17:59:23 +0000926 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000927
sewardj2c1c9df2006-07-28 00:06:37 +0000928 mc = (MC_Chunk*) chunks[i];
929
sewardj8aeeaa92006-08-16 17:51:28 +0000930 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000931 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +0000932
njn718d3b12006-12-16 00:54:12 +0000933#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +0000934
sewardj8aeeaa92006-08-16 17:51:28 +0000935 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +0000936
937 /* The current chunk is entirely within the trim extent: keep
938 it. */
939
940 continue;
941
sewardj8aeeaa92006-08-16 17:51:28 +0000942 } else if ( (! EXTENT_CONTAINS(lo)) &&
943 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +0000944
945 /* The current chunk is entirely outside the trim extent:
946 delete it. */
947
948 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
949 MC_(record_free_error)(tid, (Addr)mc->data);
950 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000951 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000952 return;
953 }
954 die_and_free_mem ( tid, mc, mp->rzB );
955
956 } else {
957
958 /* The current chunk intersects the trim extent: remove,
959 trim, and reinsert it. */
960
sewardj8aeeaa92006-08-16 17:51:28 +0000961 tl_assert(EXTENT_CONTAINS(lo) ||
962 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +0000963 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
964 MC_(record_free_error)(tid, (Addr)mc->data);
965 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000966 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000967 return;
968 }
969
sewardjc740d762006-10-05 17:59:23 +0000970 if (mc->data < addr) {
971 min = mc->data;
972 lo = addr;
973 } else {
974 min = addr;
975 lo = mc->data;
976 }
sewardj2c1c9df2006-07-28 00:06:37 +0000977
njn718d3b12006-12-16 00:54:12 +0000978 if (mc->data + szB > addr + szB) {
979 max = mc->data + szB;
980 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +0000981 } else {
njn718d3b12006-12-16 00:54:12 +0000982 max = addr + szB;
983 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +0000984 }
985
986 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000987 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +0000988 tl_assert(hi <= max);
989
990 if (min < lo && !EXTENT_CONTAINS(min)) {
991 MC_(make_mem_noaccess)( min, lo - min);
992 }
993
994 if (hi < max && !EXTENT_CONTAINS(max)) {
995 MC_(make_mem_noaccess)( hi, max - hi );
996 }
997
sewardj2c1c9df2006-07-28 00:06:37 +0000998 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +0000999 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +00001000 VG_(HT_add_node)( mp->chunks, mc );
1001 }
1002
1003#undef EXTENT_CONTAINS
1004
1005 }
sewardjc740d762006-10-05 17:59:23 +00001006 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +00001007 VG_(free)(chunks);
1008}
1009
sewardjc740d762006-10-05 17:59:23 +00001010void MC_(move_mempool)(Addr poolA, Addr poolB)
1011{
1012 MC_Mempool* mp;
1013
1014 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +00001015 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +00001016 VG_(get_and_pp_StackTrace)
1017 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1018 }
1019
1020 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
1021
1022 if (mp == NULL) {
1023 ThreadId tid = VG_(get_running_tid)();
1024 MC_(record_illegal_mempool_error) ( tid, poolA );
1025 return;
1026 }
1027
1028 mp->pool = poolB;
1029 VG_(HT_add_node)( MC_(mempool_list), mp );
1030}
1031
njn718d3b12006-12-16 00:54:12 +00001032void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +00001033{
1034 MC_Mempool* mp;
1035 MC_Chunk* mc;
1036 ThreadId tid = VG_(get_running_tid)();
1037
1038 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +00001039 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
njn718d3b12006-12-16 00:54:12 +00001040 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +00001041 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
1042 }
1043
1044 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1045 if (mp == NULL) {
1046 MC_(record_illegal_mempool_error)(tid, pool);
1047 return;
1048 }
1049
1050 check_mempool_sane(mp);
1051
1052 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
1053 if (mc == NULL) {
1054 MC_(record_free_error)(tid, (Addr)addrA);
1055 return;
1056 }
1057
1058 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +00001059 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +00001060 VG_(HT_add_node)( mp->chunks, mc );
1061
1062 check_mempool_sane(mp);
1063}
1064
1065Bool MC_(mempool_exists)(Addr pool)
1066{
1067 MC_Mempool* mp;
1068
1069 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
1070 if (mp == NULL) {
1071 return False;
1072 }
1073 return True;
1074}
1075
1076
njn1d0825f2006-03-27 11:37:07 +00001077/*------------------------------------------------------------*/
1078/*--- Statistics printing ---*/
1079/*------------------------------------------------------------*/
1080
1081void MC_(print_malloc_stats) ( void )
1082{
1083 MC_Chunk* mc;
1084 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +00001085 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +00001086
1087 if (VG_(clo_verbosity) == 0)
1088 return;
1089 if (VG_(clo_xml))
1090 return;
1091
1092 /* Count memory still in use. */
1093 VG_(HT_ResetIter)(MC_(malloc_list));
1094 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1095 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +00001096 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +00001097 }
1098
sewardj2d9e8742009-08-07 15:46:56 +00001099 VG_(umsg)(
1100 "HEAP SUMMARY:\n"
njnb6267bd2009-08-12 00:14:16 +00001101 " in use at exit: %'llu bytes in %'lu blocks\n"
1102 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
1103 "\n",
1104 nbytes, nblocks,
sewardj6b523cd2009-07-15 14:49:40 +00001105 cmalloc_n_mallocs,
1106 cmalloc_n_frees, cmalloc_bs_mallocd
1107 );
njn1d0825f2006-03-27 11:37:07 +00001108}
1109
philippea22f59d2012-01-26 23:13:52 +00001110SizeT MC_(get_cmalloc_n_frees) ( void )
1111{
1112 return cmalloc_n_frees;
1113}
1114
1115
njn1d0825f2006-03-27 11:37:07 +00001116/*--------------------------------------------------------------------*/
1117/*--- end ---*/
1118/*--------------------------------------------------------------------*/