blob: 2e8b78103c6d9a73a3b4d1f6e95ab732a4e61ef3 [file] [log] [blame]
njn1d0825f2006-03-27 11:37:07 +00001
2/*--------------------------------------------------------------------*/
3/*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4/*--- mc_malloc_wrappers.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
njne2656362007-03-10 02:27:44 +00009 detecting memory errors.
njn1d0825f2006-03-27 11:37:07 +000010
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2000-2010 Julian Seward
njn1d0825f2006-03-27 11:37:07 +000012 jseward@acm.org
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_execontext.h"
34#include "pub_tool_hashtable.h"
35#include "pub_tool_libcbase.h"
36#include "pub_tool_libcassert.h"
37#include "pub_tool_libcprint.h"
38#include "pub_tool_mallocfree.h"
39#include "pub_tool_options.h"
40#include "pub_tool_replacemalloc.h"
41#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h" // Needed for mc_include.h
sewardjc740d762006-10-05 17:59:23 +000043#include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
njn1d0825f2006-03-27 11:37:07 +000044
45#include "mc_include.h"
46
47/*------------------------------------------------------------*/
48/*--- Defns ---*/
49/*------------------------------------------------------------*/
50
51/* Stats ... */
52static SizeT cmalloc_n_mallocs = 0;
53static SizeT cmalloc_n_frees = 0;
sewardjea9c15e2007-03-14 11:57:37 +000054static ULong cmalloc_bs_mallocd = 0;
njn1d0825f2006-03-27 11:37:07 +000055
sewardjc740d762006-10-05 17:59:23 +000056/* For debug printing to do with mempools: what stack trace
57 depth to show. */
58#define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
59
njn1d0825f2006-03-27 11:37:07 +000060
61/*------------------------------------------------------------*/
62/*--- Tracking malloc'd and free'd blocks ---*/
63/*------------------------------------------------------------*/
64
65/* Record malloc'd blocks. */
66VgHashTable MC_(malloc_list) = NULL;
67
sewardj62b91042011-01-23 20:45:53 +000068/* Memory pools: a hash table of MC_Mempools. Search key is
69 MC_Mempool::pool. */
njn1d0825f2006-03-27 11:37:07 +000070VgHashTable MC_(mempool_list) = NULL;
71
72/* Records blocks after freeing. */
73static MC_Chunk* freed_list_start = NULL;
74static MC_Chunk* freed_list_end = NULL;
njn1d0825f2006-03-27 11:37:07 +000075
76/* Put a shadow chunk on the freed blocks queue, possibly freeing up
77 some of the oldest blocks in the queue at the same time. */
78static void add_to_freed_queue ( MC_Chunk* mc )
79{
sewardjfa4ca3b2007-11-30 17:19:36 +000080 const Bool show = False;
81
njn1d0825f2006-03-27 11:37:07 +000082 /* Put it at the end of the freed list */
83 if (freed_list_end == NULL) {
84 tl_assert(freed_list_start == NULL);
85 freed_list_end = freed_list_start = mc;
bart545380e2008-04-21 17:28:50 +000086 VG_(free_queue_volume) = (Long)mc->szB;
njn1d0825f2006-03-27 11:37:07 +000087 } else {
88 tl_assert(freed_list_end->next == NULL);
89 freed_list_end->next = mc;
90 freed_list_end = mc;
bart545380e2008-04-21 17:28:50 +000091 VG_(free_queue_volume) += (Long)mc->szB;
sewardjfa4ca3b2007-11-30 17:19:36 +000092 if (show)
93 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
bart545380e2008-04-21 17:28:50 +000094 VG_(free_queue_volume));
njn1d0825f2006-03-27 11:37:07 +000095 }
bart545380e2008-04-21 17:28:50 +000096 VG_(free_queue_length)++;
njn1d0825f2006-03-27 11:37:07 +000097 mc->next = NULL;
98
99 /* Release enough of the oldest blocks to bring the free queue
100 volume below vg_clo_freelist_vol. */
101
bart545380e2008-04-21 17:28:50 +0000102 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)) {
njn1d0825f2006-03-27 11:37:07 +0000103 MC_Chunk* mc1;
104
105 tl_assert(freed_list_start != NULL);
106 tl_assert(freed_list_end != NULL);
107
108 mc1 = freed_list_start;
bart545380e2008-04-21 17:28:50 +0000109 VG_(free_queue_volume) -= (Long)mc1->szB;
110 VG_(free_queue_length)--;
sewardjfa4ca3b2007-11-30 17:19:36 +0000111 if (show)
112 VG_(printf)("mc_freelist: discard: volume now %lld\n",
bart545380e2008-04-21 17:28:50 +0000113 VG_(free_queue_volume));
114 tl_assert(VG_(free_queue_volume) >= 0);
njn1d0825f2006-03-27 11:37:07 +0000115
116 if (freed_list_start == freed_list_end) {
117 freed_list_start = freed_list_end = NULL;
118 } else {
119 freed_list_start = mc1->next;
120 }
121 mc1->next = NULL; /* just paranoia */
122
123 /* free MC_Chunk */
bartc1fc1332010-09-02 10:24:49 +0000124 if (MC_AllocCustom != mc1->allockind)
125 VG_(cli_free) ( (void*)(mc1->data) );
njn1d0825f2006-03-27 11:37:07 +0000126 VG_(free) ( mc1 );
127 }
128}
129
130MC_Chunk* MC_(get_freed_list_head)(void)
131{
132 return freed_list_start;
133}
134
135/* Allocate its shadow chunk, put it on the appropriate list. */
136static
sewardj7cf4e6b2008-05-01 20:24:26 +0000137MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
njn1d0825f2006-03-27 11:37:07 +0000138 MC_AllocKind kind)
139{
sewardj9c606bd2008-09-18 18:12:50 +0000140 MC_Chunk* mc = VG_(malloc)("mc.cMC.1 (a MC_Chunk)", sizeof(MC_Chunk));
njn1d0825f2006-03-27 11:37:07 +0000141 mc->data = p;
njn718d3b12006-12-16 00:54:12 +0000142 mc->szB = szB;
njn1d0825f2006-03-27 11:37:07 +0000143 mc->allockind = kind;
sewardj7cf4e6b2008-05-01 20:24:26 +0000144 mc->where = ec;
njn1d0825f2006-03-27 11:37:07 +0000145
146 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
147 the mc->data field isn't visible to the leak checker. If memory
148 management is working correctly, any pointer returned by VG_(malloc)
149 should be noaccess as far as the client is concerned. */
njndbf7ca72006-03-31 11:57:59 +0000150 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000151 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
152 }
153 return mc;
154}
155
156/*------------------------------------------------------------*/
157/*--- client_malloc(), etc ---*/
158/*------------------------------------------------------------*/
159
njn017d3772009-05-19 02:10:26 +0000160// XXX: should make this a proper error (bug #79311).
njn1d0825f2006-03-27 11:37:07 +0000161static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
162{
163 // Cast to a signed type to catch any unexpectedly negative args. We're
164 // assuming here that the size asked for is not greater than 2^31 bytes
165 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
166 if ((SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000167 if (!VG_(clo_xml))
sewardj6b523cd2009-07-15 14:49:40 +0000168 VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
sewardj22faf712007-11-09 11:33:02 +0000169 (SSizeT)sizeB, fn );
njn1d0825f2006-03-27 11:37:07 +0000170 return True;
171 }
172 return False;
173}
174
175static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
176{
177 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
sewardj22faf712007-11-09 11:33:02 +0000178 if (!VG_(clo_xml))
179 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000180 "Warning: silly args (%ld,%ld) to calloc()\n",
sewardj22faf712007-11-09 11:33:02 +0000181 (SSizeT)n, (SSizeT)sizeB);
njn1d0825f2006-03-27 11:37:07 +0000182 return True;
183 }
184 return False;
185}
186
187/* Allocate memory and note change in memory available */
njn1d0825f2006-03-27 11:37:07 +0000188void* MC_(new_block) ( ThreadId tid,
njn1dcee092009-02-24 03:07:37 +0000189 Addr p, SizeT szB, SizeT alignB,
sewardjeb0fa932007-11-30 21:41:40 +0000190 Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
njn1d0825f2006-03-27 11:37:07 +0000191{
sewardj7cf4e6b2008-05-01 20:24:26 +0000192 ExeContext* ec;
193
njn1d0825f2006-03-27 11:37:07 +0000194 cmalloc_n_mallocs ++;
195
196 // Allocate and zero if necessary
197 if (p) {
198 tl_assert(MC_AllocCustom == kind);
199 } else {
200 tl_assert(MC_AllocCustom != kind);
njn718d3b12006-12-16 00:54:12 +0000201 p = (Addr)VG_(cli_malloc)( alignB, szB );
njn1d0825f2006-03-27 11:37:07 +0000202 if (!p) {
203 return NULL;
204 }
sewardjeb0fa932007-11-30 21:41:40 +0000205 if (is_zeroed) {
206 VG_(memset)((void*)p, 0, szB);
207 } else
208 if (MC_(clo_malloc_fill) != -1) {
209 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
210 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
211 }
njn1d0825f2006-03-27 11:37:07 +0000212 }
213
214 // Only update this stat if allocation succeeded.
sewardjea9c15e2007-03-14 11:57:37 +0000215 cmalloc_bs_mallocd += (ULong)szB;
njn1d0825f2006-03-27 11:37:07 +0000216
sewardj7cf4e6b2008-05-01 20:24:26 +0000217 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
218 tl_assert(ec);
219
220 VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
njn1d0825f2006-03-27 11:37:07 +0000221
222 if (is_zeroed)
njn718d3b12006-12-16 00:54:12 +0000223 MC_(make_mem_defined)( p, szB );
sewardj7cf4e6b2008-05-01 20:24:26 +0000224 else {
225 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
226 tl_assert(VG_(is_plausible_ECU)(ecu));
227 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
228 }
njn1d0825f2006-03-27 11:37:07 +0000229
230 return (void*)p;
231}
232
233void* MC_(malloc) ( ThreadId tid, SizeT n )
234{
235 if (complain_about_silly_args(n, "malloc")) {
236 return NULL;
237 } else {
238 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000239 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000240 }
241}
242
243void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
244{
245 if (complain_about_silly_args(n, "__builtin_new")) {
246 return NULL;
247 } else {
248 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000249 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000250 }
251}
252
253void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
254{
255 if (complain_about_silly_args(n, "__builtin_vec_new")) {
256 return NULL;
257 } else {
258 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000259 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000260 }
261}
262
njn718d3b12006-12-16 00:54:12 +0000263void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
njn1d0825f2006-03-27 11:37:07 +0000264{
265 if (complain_about_silly_args(n, "memalign")) {
266 return NULL;
267 } else {
njn718d3b12006-12-16 00:54:12 +0000268 return MC_(new_block) ( tid, 0, n, alignB,
njn1dcee092009-02-24 03:07:37 +0000269 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000270 }
271}
272
273void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
274{
275 if (complain_about_silly_args2(nmemb, size1)) {
276 return NULL;
277 } else {
278 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
njn1dcee092009-02-24 03:07:37 +0000279 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
njn1d0825f2006-03-27 11:37:07 +0000280 }
281}
282
283static
284void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
285{
sewardjeb0fa932007-11-30 21:41:40 +0000286 if (MC_(clo_free_fill) != -1) {
287 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
288 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
289 }
290
njn1d0825f2006-03-27 11:37:07 +0000291 /* Note: make redzones noaccess again -- just in case user made them
292 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000293 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
njn1d0825f2006-03-27 11:37:07 +0000294
bartc1fc1332010-09-02 10:24:49 +0000295 /* Record where freed */
296 mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
297 /* Put it out of harm's way for a while */
298 add_to_freed_queue ( mc );
njn1d0825f2006-03-27 11:37:07 +0000299}
300
njn1d0825f2006-03-27 11:37:07 +0000301void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
302{
303 MC_Chunk* mc;
304
305 cmalloc_n_frees++;
306
307 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
308 if (mc == NULL) {
309 MC_(record_free_error) ( tid, p );
310 } else {
311 /* check if it is a matching free() / delete / delete [] */
312 if (kind != mc->allockind) {
njn718d3b12006-12-16 00:54:12 +0000313 tl_assert(p == mc->data);
314 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000315 }
316 die_and_free_mem ( tid, mc, rzB );
317 }
318}
319
320void MC_(free) ( ThreadId tid, void* p )
321{
322 MC_(handle_free)(
323 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
324}
325
326void MC_(__builtin_delete) ( ThreadId tid, void* p )
327{
328 MC_(handle_free)(
329 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
330}
331
332void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
333{
334 MC_(handle_free)(
335 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
336}
337
njn718d3b12006-12-16 00:54:12 +0000338void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
njn1d0825f2006-03-27 11:37:07 +0000339{
340 MC_Chunk* mc;
341 void* p_new;
njn718d3b12006-12-16 00:54:12 +0000342 SizeT old_szB;
njn1d0825f2006-03-27 11:37:07 +0000343
344 cmalloc_n_frees ++;
345 cmalloc_n_mallocs ++;
sewardjea9c15e2007-03-14 11:57:37 +0000346 cmalloc_bs_mallocd += (ULong)new_szB;
njn1d0825f2006-03-27 11:37:07 +0000347
njn718d3b12006-12-16 00:54:12 +0000348 if (complain_about_silly_args(new_szB, "realloc"))
njn1d0825f2006-03-27 11:37:07 +0000349 return NULL;
350
351 /* Remove the old block */
352 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
353 if (mc == NULL) {
354 MC_(record_free_error) ( tid, (Addr)p_old );
355 /* We return to the program regardless. */
356 return NULL;
357 }
358
359 /* check if its a matching free() / delete / delete [] */
360 if (MC_AllocMalloc != mc->allockind) {
361 /* can not realloc a range that was allocated with new or new [] */
njn718d3b12006-12-16 00:54:12 +0000362 tl_assert((Addr)p_old == mc->data);
363 MC_(record_freemismatch_error) ( tid, mc );
njn1d0825f2006-03-27 11:37:07 +0000364 /* but keep going anyway */
365 }
366
njn718d3b12006-12-16 00:54:12 +0000367 old_szB = mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000368
sewardjb3238a52008-07-29 09:44:52 +0000369 /* In all cases, even when the new size is smaller or unchanged, we
370 reallocate and copy the contents, and make the old block
371 inaccessible. This is so as to guarantee to catch all cases of
372 accesses via the old address after reallocation, regardless of
373 the change in size. (Of course the ability to detect accesses
374 to the old block also depends on the size of the freed blocks
375 queue). */
376
sewardj8849a562008-07-22 18:23:16 +0000377 if (new_szB <= old_szB) {
378 /* new size is smaller or the same */
379 Addr a_new;
380 /* Get new memory */
381 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
382
383 if (a_new) {
384 ExeContext* ec;
385
386 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
387 tl_assert(ec);
388
389 /* Retained part is copied, red zones set as normal */
390 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
391 MC_MALLOC_REDZONE_SZB );
392 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
393 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
394
395 /* Copy from old to new */
396 VG_(memcpy)((void*)a_new, p_old, new_szB);
397
398 /* Possibly fill freed area with specified junk. */
399 if (MC_(clo_free_fill) != -1) {
400 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
401 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
402 }
403
404 /* Free old memory */
405 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
406 than recycling the old one, so that any erroneous accesses to the
407 old memory are reported. */
408 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
409
410 // Allocate a new chunk.
411 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
sewardjeb0fa932007-11-30 21:41:40 +0000412 }
njn1d0825f2006-03-27 11:37:07 +0000413
sewardj8849a562008-07-22 18:23:16 +0000414 p_new = (void*)a_new;
415
njn1d0825f2006-03-27 11:37:07 +0000416 } else {
417 /* new size is bigger */
sewardjeb0fa932007-11-30 21:41:40 +0000418 Addr a_new;
419 tl_assert(old_szB < new_szB);
njn1d0825f2006-03-27 11:37:07 +0000420 /* Get new memory */
sewardjeb0fa932007-11-30 21:41:40 +0000421 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
njn1d0825f2006-03-27 11:37:07 +0000422
423 if (a_new) {
sewardj7cf4e6b2008-05-01 20:24:26 +0000424 UInt ecu;
425 ExeContext* ec;
426
427 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
428 tl_assert(ec);
429 ecu = VG_(get_ECU_from_ExeContext)(ec);
430 tl_assert(VG_(is_plausible_ECU)(ecu));
431
njn1d0825f2006-03-27 11:37:07 +0000432 /* First half kept and copied, second half new, red zones as normal */
sewardj7cf4e6b2008-05-01 20:24:26 +0000433 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
434 MC_MALLOC_REDZONE_SZB );
435 MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
436 MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
437 ecu | MC_OKIND_HEAP );
438 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
njn1d0825f2006-03-27 11:37:07 +0000439
sewardjeb0fa932007-11-30 21:41:40 +0000440 /* Possibly fill new area with specified junk */
441 if (MC_(clo_malloc_fill) != -1) {
442 tl_assert(MC_(clo_malloc_fill) >= 0x00
443 && MC_(clo_malloc_fill) <= 0xFF);
444 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
445 new_szB-old_szB);
446 }
447
njn1d0825f2006-03-27 11:37:07 +0000448 /* Copy from old to new */
njn718d3b12006-12-16 00:54:12 +0000449 VG_(memcpy)((void*)a_new, p_old, mc->szB);
njn1d0825f2006-03-27 11:37:07 +0000450
sewardjeb0fa932007-11-30 21:41:40 +0000451 /* Possibly fill freed area with specified junk. */
452 if (MC_(clo_free_fill) != -1) {
453 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
454 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
455 }
456
njn1d0825f2006-03-27 11:37:07 +0000457 /* Free old memory */
458 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
459 than recycling the old one, so that any erroneous accesses to the
460 old memory are reported. */
461 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
462
463 // Allocate a new chunk.
sewardj7cf4e6b2008-05-01 20:24:26 +0000464 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
njn1d0825f2006-03-27 11:37:07 +0000465 }
466
467 p_new = (void*)a_new;
468 }
469
470 // Now insert the new mc (with a possibly new 'data' field) into
471 // malloc_list. If this realloc() did not increase the memory size, we
472 // will have removed and then re-added mc unnecessarily. But that's ok
473 // because shrinking a block with realloc() is (presumably) much rarer
474 // than growing it, and this way simplifies the growing case.
475 VG_(HT_add_node)( MC_(malloc_list), mc );
476
477 return p_new;
478}
479
njn8b140de2009-02-17 04:31:18 +0000480SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
481{
482 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
483
484 // There may be slop, but pretend there isn't because only the asked-for
485 // area will be marked as addressable.
486 return ( mc ? mc->szB : 0 );
487}
488
njn017d3772009-05-19 02:10:26 +0000489
sewardj62b91042011-01-23 20:45:53 +0000490/*------------------------------------------------------------*/
491/*--- Memory pool stuff. ---*/
492/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +0000493
sewardj7e30be42011-01-27 23:56:36 +0000494/* Set to 1 for intensive sanity checking. Is very expensive though
495 and should not be used in production scenarios. See #255966. */
496#define MP_DETAILED_SANITY_CHECKS 0
497
498static void check_mempool_sane(MC_Mempool* mp); /*forward*/
499
500
njn1d0825f2006-03-27 11:37:07 +0000501void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
502{
sewardjc740d762006-10-05 17:59:23 +0000503 MC_Mempool* mp;
504
505 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000506 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
sewardjc740d762006-10-05 17:59:23 +0000507 pool, rzB, is_zeroed);
508 VG_(get_and_pp_StackTrace)
509 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
510 }
511
512 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
513 if (mp != NULL) {
514 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
515 }
516
sewardj9c606bd2008-09-18 18:12:50 +0000517 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
njn1d0825f2006-03-27 11:37:07 +0000518 mp->pool = pool;
519 mp->rzB = rzB;
520 mp->is_zeroed = is_zeroed;
sewardj3f94a7d2007-08-25 07:19:08 +0000521 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
sewardj7e30be42011-01-27 23:56:36 +0000522 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000523
524 /* Paranoia ... ensure this area is off-limits to the client, so
525 the mp->data field isn't visible to the leak checker. If memory
526 management is working correctly, anything pointer returned by
527 VG_(malloc) should be noaccess as far as the client is
528 concerned. */
njndbf7ca72006-03-31 11:57:59 +0000529 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
njn1d0825f2006-03-27 11:37:07 +0000530 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
531 }
532
533 VG_(HT_add_node)( MC_(mempool_list), mp );
534}
535
536void MC_(destroy_mempool)(Addr pool)
537{
538 MC_Chunk* mc;
539 MC_Mempool* mp;
540
sewardjc740d762006-10-05 17:59:23 +0000541 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000542 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
sewardjc740d762006-10-05 17:59:23 +0000543 VG_(get_and_pp_StackTrace)
544 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
545 }
546
njn1d0825f2006-03-27 11:37:07 +0000547 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
548
549 if (mp == NULL) {
550 ThreadId tid = VG_(get_running_tid)();
551 MC_(record_illegal_mempool_error) ( tid, pool );
552 return;
553 }
sewardj7e30be42011-01-27 23:56:36 +0000554 check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000555
556 // Clean up the chunks, one by one
557 VG_(HT_ResetIter)(mp->chunks);
558 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
559 /* Note: make redzones noaccess again -- just in case user made them
560 accessible with a client request... */
njn718d3b12006-12-16 00:54:12 +0000561 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
njn1d0825f2006-03-27 11:37:07 +0000562 }
563 // Destroy the chunk table
564 VG_(HT_destruct)(mp->chunks);
565
566 VG_(free)(mp);
567}
568
sewardjc740d762006-10-05 17:59:23 +0000569static Int
570mp_compar(void* n1, void* n2)
571{
572 MC_Chunk* mc1 = *(MC_Chunk**)n1;
573 MC_Chunk* mc2 = *(MC_Chunk**)n2;
sewardjb8b79ad2008-03-03 01:35:41 +0000574 if (mc1->data < mc2->data) return -1;
575 if (mc1->data > mc2->data) return 1;
576 return 0;
sewardjc740d762006-10-05 17:59:23 +0000577}
578
579static void
580check_mempool_sane(MC_Mempool* mp)
581{
582 UInt n_chunks, i, bad = 0;
583 static UInt tick = 0;
584
585 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
586 if (!chunks)
587 return;
588
589 if (VG_(clo_verbosity) > 1) {
590 if (tick++ >= 10000)
591 {
592 UInt total_pools = 0, total_chunks = 0;
593 MC_Mempool* mp2;
594
595 VG_(HT_ResetIter)(MC_(mempool_list));
596 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
597 total_pools++;
598 VG_(HT_ResetIter)(mp2->chunks);
599 while (VG_(HT_Next)(mp2->chunks)) {
600 total_chunks++;
601 }
602 }
603
sewardj6b523cd2009-07-15 14:49:40 +0000604 VG_(message)(Vg_UserMsg,
sewardjc740d762006-10-05 17:59:23 +0000605 "Total mempools active: %d pools, %d chunks\n",
606 total_pools, total_chunks);
607 tick = 0;
608 }
609 }
610
611
612 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
613
614 /* Sanity check; assert that the blocks are now in order */
615 for (i = 0; i < n_chunks-1; i++) {
616 if (chunks[i]->data > chunks[i+1]->data) {
617 VG_(message)(Vg_UserMsg,
618 "Mempool chunk %d / %d is out of order "
sewardj6b523cd2009-07-15 14:49:40 +0000619 "wrt. its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000620 i+1, n_chunks);
621 bad = 1;
622 }
623 }
624
625 /* Sanity check -- make sure they don't overlap */
626 for (i = 0; i < n_chunks-1; i++) {
njn718d3b12006-12-16 00:54:12 +0000627 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
sewardjc740d762006-10-05 17:59:23 +0000628 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000629 "Mempool chunk %d / %d overlaps with its successor\n",
sewardjc740d762006-10-05 17:59:23 +0000630 i+1, n_chunks);
631 bad = 1;
632 }
633 }
634
635 if (bad) {
636 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000637 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
638 n_chunks);
sewardjc740d762006-10-05 17:59:23 +0000639 for (i = 0; i < n_chunks; ++i) {
640 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000641 "Mempool chunk %d / %d: %ld bytes "
642 "[%lx,%lx), allocated:\n",
sewardjc740d762006-10-05 17:59:23 +0000643 i+1,
644 n_chunks,
barta0b6b2c2008-07-07 06:49:24 +0000645 chunks[i]->szB + 0UL,
sewardjc740d762006-10-05 17:59:23 +0000646 chunks[i]->data,
njn718d3b12006-12-16 00:54:12 +0000647 chunks[i]->data + chunks[i]->szB);
sewardjc740d762006-10-05 17:59:23 +0000648
649 VG_(pp_ExeContext)(chunks[i]->where);
650 }
651 }
652 VG_(free)(chunks);
653}
654
njn718d3b12006-12-16 00:54:12 +0000655void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
njn1d0825f2006-03-27 11:37:07 +0000656{
sewardjc740d762006-10-05 17:59:23 +0000657 MC_Mempool* mp;
njn1d0825f2006-03-27 11:37:07 +0000658
sewardjc740d762006-10-05 17:59:23 +0000659 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000660 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
661 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000662 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
663 }
664
665 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
njn1d0825f2006-03-27 11:37:07 +0000666 if (mp == NULL) {
667 MC_(record_illegal_mempool_error) ( tid, pool );
668 } else {
sewardj7e30be42011-01-27 23:56:36 +0000669 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1dcee092009-02-24 03:07:37 +0000670 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
njn1d0825f2006-03-27 11:37:07 +0000671 MC_AllocCustom, mp->chunks);
sewardj7e30be42011-01-27 23:56:36 +0000672 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000673 }
674}
675
676void MC_(mempool_free)(Addr pool, Addr addr)
677{
678 MC_Mempool* mp;
679 MC_Chunk* mc;
680 ThreadId tid = VG_(get_running_tid)();
681
682 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
683 if (mp == NULL) {
684 MC_(record_illegal_mempool_error)(tid, pool);
685 return;
686 }
687
sewardjc740d762006-10-05 17:59:23 +0000688 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000689 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
sewardjc740d762006-10-05 17:59:23 +0000690 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
691 }
692
sewardj7e30be42011-01-27 23:56:36 +0000693 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000694 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
695 if (mc == NULL) {
696 MC_(record_free_error)(tid, (Addr)addr);
697 return;
698 }
699
sewardjc740d762006-10-05 17:59:23 +0000700 if (VG_(clo_verbosity) > 2) {
701 VG_(message)(Vg_UserMsg,
sewardj6b523cd2009-07-15 14:49:40 +0000702 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
barta0b6b2c2008-07-07 06:49:24 +0000703 pool, addr, mc->szB + 0UL);
sewardjc740d762006-10-05 17:59:23 +0000704 }
705
njn1d0825f2006-03-27 11:37:07 +0000706 die_and_free_mem ( tid, mc, mp->rzB );
sewardj7e30be42011-01-27 23:56:36 +0000707 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
njn1d0825f2006-03-27 11:37:07 +0000708}
709
sewardj2c1c9df2006-07-28 00:06:37 +0000710
njn718d3b12006-12-16 00:54:12 +0000711void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
sewardj2c1c9df2006-07-28 00:06:37 +0000712{
713 MC_Mempool* mp;
714 MC_Chunk* mc;
715 ThreadId tid = VG_(get_running_tid)();
716 UInt n_shadows, i;
717 VgHashNode** chunks;
718
sewardjc740d762006-10-05 17:59:23 +0000719 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000720 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
721 pool, addr, szB);
sewardjc740d762006-10-05 17:59:23 +0000722 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
723 }
724
sewardj2c1c9df2006-07-28 00:06:37 +0000725 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
726 if (mp == NULL) {
727 MC_(record_illegal_mempool_error)(tid, pool);
728 return;
729 }
730
sewardjc740d762006-10-05 17:59:23 +0000731 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000732 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
733 if (n_shadows == 0) {
734 tl_assert(chunks == NULL);
735 return;
736 }
737
738 tl_assert(chunks != NULL);
739 for (i = 0; i < n_shadows; ++i) {
sewardj8aeeaa92006-08-16 17:51:28 +0000740
sewardjc740d762006-10-05 17:59:23 +0000741 Addr lo, hi, min, max;
sewardj8aeeaa92006-08-16 17:51:28 +0000742
sewardj2c1c9df2006-07-28 00:06:37 +0000743 mc = (MC_Chunk*) chunks[i];
744
sewardj8aeeaa92006-08-16 17:51:28 +0000745 lo = mc->data;
njn718d3b12006-12-16 00:54:12 +0000746 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
sewardj2c1c9df2006-07-28 00:06:37 +0000747
njn718d3b12006-12-16 00:54:12 +0000748#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
sewardj2c1c9df2006-07-28 00:06:37 +0000749
sewardj8aeeaa92006-08-16 17:51:28 +0000750 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
sewardj2c1c9df2006-07-28 00:06:37 +0000751
752 /* The current chunk is entirely within the trim extent: keep
753 it. */
754
755 continue;
756
sewardj8aeeaa92006-08-16 17:51:28 +0000757 } else if ( (! EXTENT_CONTAINS(lo)) &&
758 (! EXTENT_CONTAINS(hi)) ) {
sewardj2c1c9df2006-07-28 00:06:37 +0000759
760 /* The current chunk is entirely outside the trim extent:
761 delete it. */
762
763 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
764 MC_(record_free_error)(tid, (Addr)mc->data);
765 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000766 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000767 return;
768 }
769 die_and_free_mem ( tid, mc, mp->rzB );
770
771 } else {
772
773 /* The current chunk intersects the trim extent: remove,
774 trim, and reinsert it. */
775
sewardj8aeeaa92006-08-16 17:51:28 +0000776 tl_assert(EXTENT_CONTAINS(lo) ||
777 EXTENT_CONTAINS(hi));
sewardj2c1c9df2006-07-28 00:06:37 +0000778 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
779 MC_(record_free_error)(tid, (Addr)mc->data);
780 VG_(free)(chunks);
sewardj7e30be42011-01-27 23:56:36 +0000781 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000782 return;
783 }
784
sewardjc740d762006-10-05 17:59:23 +0000785 if (mc->data < addr) {
786 min = mc->data;
787 lo = addr;
788 } else {
789 min = addr;
790 lo = mc->data;
791 }
sewardj2c1c9df2006-07-28 00:06:37 +0000792
njn718d3b12006-12-16 00:54:12 +0000793 if (mc->data + szB > addr + szB) {
794 max = mc->data + szB;
795 hi = addr + szB;
sewardjc740d762006-10-05 17:59:23 +0000796 } else {
njn718d3b12006-12-16 00:54:12 +0000797 max = addr + szB;
798 hi = mc->data + szB;
sewardjc740d762006-10-05 17:59:23 +0000799 }
800
801 tl_assert(min <= lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000802 tl_assert(lo < hi);
sewardjc740d762006-10-05 17:59:23 +0000803 tl_assert(hi <= max);
804
805 if (min < lo && !EXTENT_CONTAINS(min)) {
806 MC_(make_mem_noaccess)( min, lo - min);
807 }
808
809 if (hi < max && !EXTENT_CONTAINS(max)) {
810 MC_(make_mem_noaccess)( hi, max - hi );
811 }
812
sewardj2c1c9df2006-07-28 00:06:37 +0000813 mc->data = lo;
njn718d3b12006-12-16 00:54:12 +0000814 mc->szB = (UInt) (hi - lo);
sewardj2c1c9df2006-07-28 00:06:37 +0000815 VG_(HT_add_node)( mp->chunks, mc );
816 }
817
818#undef EXTENT_CONTAINS
819
820 }
sewardjc740d762006-10-05 17:59:23 +0000821 check_mempool_sane(mp);
sewardj2c1c9df2006-07-28 00:06:37 +0000822 VG_(free)(chunks);
823}
824
sewardjc740d762006-10-05 17:59:23 +0000825void MC_(move_mempool)(Addr poolA, Addr poolB)
826{
827 MC_Mempool* mp;
828
829 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000830 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
sewardjc740d762006-10-05 17:59:23 +0000831 VG_(get_and_pp_StackTrace)
832 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
833 }
834
835 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
836
837 if (mp == NULL) {
838 ThreadId tid = VG_(get_running_tid)();
839 MC_(record_illegal_mempool_error) ( tid, poolA );
840 return;
841 }
842
843 mp->pool = poolB;
844 VG_(HT_add_node)( MC_(mempool_list), mp );
845}
846
njn718d3b12006-12-16 00:54:12 +0000847void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
sewardjc740d762006-10-05 17:59:23 +0000848{
849 MC_Mempool* mp;
850 MC_Chunk* mc;
851 ThreadId tid = VG_(get_running_tid)();
852
853 if (VG_(clo_verbosity) > 2) {
sewardj6b523cd2009-07-15 14:49:40 +0000854 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
njn718d3b12006-12-16 00:54:12 +0000855 pool, addrA, addrB, szB);
sewardjc740d762006-10-05 17:59:23 +0000856 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
857 }
858
859 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
860 if (mp == NULL) {
861 MC_(record_illegal_mempool_error)(tid, pool);
862 return;
863 }
864
865 check_mempool_sane(mp);
866
867 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
868 if (mc == NULL) {
869 MC_(record_free_error)(tid, (Addr)addrA);
870 return;
871 }
872
873 mc->data = addrB;
njn718d3b12006-12-16 00:54:12 +0000874 mc->szB = szB;
sewardjc740d762006-10-05 17:59:23 +0000875 VG_(HT_add_node)( mp->chunks, mc );
876
877 check_mempool_sane(mp);
878}
879
880Bool MC_(mempool_exists)(Addr pool)
881{
882 MC_Mempool* mp;
883
884 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
885 if (mp == NULL) {
886 return False;
887 }
888 return True;
889}
890
891
njn1d0825f2006-03-27 11:37:07 +0000892/*------------------------------------------------------------*/
893/*--- Statistics printing ---*/
894/*------------------------------------------------------------*/
895
896void MC_(print_malloc_stats) ( void )
897{
898 MC_Chunk* mc;
899 SizeT nblocks = 0;
sewardjea9c15e2007-03-14 11:57:37 +0000900 ULong nbytes = 0;
njn1d0825f2006-03-27 11:37:07 +0000901
902 if (VG_(clo_verbosity) == 0)
903 return;
904 if (VG_(clo_xml))
905 return;
906
907 /* Count memory still in use. */
908 VG_(HT_ResetIter)(MC_(malloc_list));
909 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
910 nblocks++;
sewardjea9c15e2007-03-14 11:57:37 +0000911 nbytes += (ULong)mc->szB;
njn1d0825f2006-03-27 11:37:07 +0000912 }
913
sewardj2d9e8742009-08-07 15:46:56 +0000914 VG_(umsg)(
915 "HEAP SUMMARY:\n"
njnb6267bd2009-08-12 00:14:16 +0000916 " in use at exit: %'llu bytes in %'lu blocks\n"
917 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
918 "\n",
919 nbytes, nblocks,
sewardj6b523cd2009-07-15 14:49:40 +0000920 cmalloc_n_mallocs,
921 cmalloc_n_frees, cmalloc_bs_mallocd
922 );
njn1d0825f2006-03-27 11:37:07 +0000923}
924
925/*--------------------------------------------------------------------*/
926/*--- end ---*/
927/*--------------------------------------------------------------------*/