sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
| 3 | /*--- An implementation of malloc/free which doesn't use sbrk. ---*/ |
| 4 | /*--- vg_malloc2.c ---*/ |
| 5 | /*--------------------------------------------------------------------*/ |
| 6 | |
| 7 | /* |
njn | c953984 | 2002-10-02 13:26:35 +0000 | [diff] [blame] | 8 | This file is part of Valgrind, an extensible x86 protected-mode |
| 9 | emulator for monitoring program execution on x86-Unixes. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 10 | |
nethercote | bb1c991 | 2004-01-04 16:43:23 +0000 | [diff] [blame] | 11 | Copyright (C) 2000-2004 Julian Seward |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 12 | jseward@acm.org |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 13 | |
| 14 | This program is free software; you can redistribute it and/or |
| 15 | modify it under the terms of the GNU General Public License as |
| 16 | published by the Free Software Foundation; either version 2 of the |
| 17 | License, or (at your option) any later version. |
| 18 | |
| 19 | This program is distributed in the hope that it will be useful, but |
| 20 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 22 | General Public License for more details. |
| 23 | |
| 24 | You should have received a copy of the GNU General Public License |
| 25 | along with this program; if not, write to the Free Software |
| 26 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 27 | 02111-1307, USA. |
| 28 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 29 | The GNU General Public License is contained in the file COPYING. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 30 | */ |
| 31 | |
| 32 | |
nethercote | f1e5e15 | 2004-09-01 23:58:16 +0000 | [diff] [blame] | 33 | #include "core.h" |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 34 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 35 | //#define DEBUG_MALLOC // turn on heavyweight debugging machinery |
| 36 | //#define VERBOSE_MALLOC // make verbose, esp. in debugging machinery |
| 37 | |
| 38 | /*------------------------------------------------------------*/ |
| 39 | /*--- Main types ---*/ |
| 40 | /*------------------------------------------------------------*/ |
| 41 | |
| 42 | #define VG_N_MALLOC_LISTS 16 // do not change this |
| 43 | |
| 44 | // On 64-bit systems size_t is 64-bits, so bigger than this is possible. |
| 45 | // We can worry about that when it happens... |
| 46 | #define MAX_PSZB 0x7ffffff0 |
| 47 | |
| 48 | typedef UChar UByte; |
| 49 | |
| 50 | /* Block layout: |
| 51 | |
| 52 | this block total szB (sizeof(Int) bytes) |
| 53 | freelist previous ptr (sizeof(void*) bytes) |
| 54 | red zone bytes (depends on .rz_szB field of Arena) |
| 55 | (payload bytes) |
| 56 | red zone bytes (depends on .rz_szB field of Arena) |
| 57 | freelist next ptr (sizeof(void*) bytes) |
| 58 | this block total szB (sizeof(Int) bytes) |
| 59 | |
| 60 | Total size in bytes (bszB) and payload size in bytes (pszB) |
| 61 | are related by: |
| 62 | |
| 63 | bszB == pszB + 2*sizeof(Int) + 2*sizeof(void*) + 2*a->rz_szB |
| 64 | |
| 65 | Furthermore, both size fields in the block are negative if it is |
| 66 | not in use, and positive if it is in use. A block size of zero |
| 67 | is not possible, because a block always has at least two Ints and two |
| 68 | pointers of overhead. |
| 69 | |
| 70 | Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is |
| 71 | achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned |
| 72 | (see newSuperblock() for how), and that the lengths of the following |
| 73 | things are a multiple of VG_MIN_MALLOC_SZB: |
| 74 | - Superblock admin section lengths (due to elastic padding) |
| 75 | - Block admin section (low and high) lengths (due to elastic redzones) |
| 76 | - Block payload lengths (due to req_pszB rounding up) |
| 77 | */ |
| 78 | typedef |
| 79 | struct { |
| 80 | // No fields are actually used in this struct, because a Block has |
| 81 | // loads of variable sized fields and so can't be accessed |
| 82 | // meaningfully with normal fields. So we use access functions all |
| 83 | // the time. This struct gives us a type to use, though. Also, we |
| 84 | // make sizeof(Block) 1 byte so that we can do arithmetic with the |
| 85 | // Block* type in increments of 1! |
| 86 | UByte dummy; |
| 87 | } |
| 88 | Block; |
| 89 | |
| 90 | // A superblock. 'padding' is never used, it just ensures that if the |
| 91 | // entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[] |
| 92 | // will be too. It can add small amounts of padding unnecessarily -- eg. |
| 93 | // 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because |
| 94 | // it's too hard to make a constant expression that works perfectly in all |
| 95 | // cases. |
| 96 | // payload_bytes[] is made a single big Block when the Superblock is |
| 97 | // created, and then can be split and the splittings remerged, but Blocks |
| 98 | // always cover its entire length -- there's never any unused bytes at the |
| 99 | // end, for example. |
| 100 | typedef |
| 101 | struct _Superblock { |
| 102 | struct _Superblock* next; |
| 103 | Int n_payload_bytes; |
| 104 | UByte padding[ VG_MIN_MALLOC_SZB - |
| 105 | ((sizeof(void*) + sizeof(Int)) % VG_MIN_MALLOC_SZB) ]; |
| 106 | UByte payload_bytes[0]; |
| 107 | } |
| 108 | Superblock; |
| 109 | |
| 110 | // An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is |
| 111 | // elastic, in that it can be bigger than asked-for to ensure alignment. |
| 112 | typedef |
| 113 | struct { |
| 114 | Char* name; |
| 115 | Bool clientmem; // Allocates in the client address space? |
| 116 | Int rz_szB; // Red zone size in bytes |
| 117 | Int min_sblock_szB; // Minimum superblock size in bytes |
| 118 | Block* freelist[VG_N_MALLOC_LISTS]; |
| 119 | Superblock* sblocks; |
| 120 | // Stats only. |
| 121 | UInt bytes_on_loan; |
| 122 | UInt bytes_mmaped; |
| 123 | UInt bytes_on_loan_max; |
| 124 | } |
| 125 | Arena; |
| 126 | |
| 127 | |
| 128 | /*------------------------------------------------------------*/ |
| 129 | /*--- Low-level functions for working with Blocks. ---*/ |
| 130 | /*------------------------------------------------------------*/ |
| 131 | |
| 132 | // Mark a bszB as in-use, and not in-use. |
| 133 | static __inline__ |
| 134 | Int mk_inuse_bszB ( Int bszB ) |
| 135 | { |
| 136 | vg_assert(bszB != 0); |
| 137 | return (bszB < 0) ? -bszB : bszB; |
| 138 | } |
| 139 | static __inline__ |
| 140 | Int mk_free_bszB ( Int bszB ) |
| 141 | { |
| 142 | vg_assert(bszB != 0); |
| 143 | return (bszB < 0) ? bszB : -bszB; |
| 144 | } |
| 145 | |
| 146 | // Remove the in-use/not-in-use attribute from a bszB, leaving just |
| 147 | // the size. |
| 148 | static __inline__ |
| 149 | Int mk_plain_bszB ( Int bszB ) |
| 150 | { |
| 151 | vg_assert(bszB != 0); |
| 152 | return (bszB < 0) ? -bszB : bszB; |
| 153 | } |
| 154 | |
| 155 | // Does this bszB have the in-use attribute? |
| 156 | static __inline__ |
| 157 | Bool is_inuse_bszB ( Int bszB ) |
| 158 | { |
| 159 | vg_assert(bszB != 0); |
| 160 | return (bszB < 0) ? False : True; |
| 161 | } |
| 162 | |
| 163 | |
| 164 | // Set and get the lower size field of a block. |
| 165 | static __inline__ |
| 166 | void set_bszB_lo ( Block* b, Int bszB ) |
| 167 | { |
| 168 | *(Int*)&b[0] = bszB; |
| 169 | } |
| 170 | static __inline__ |
| 171 | Int get_bszB_lo ( Block* b ) |
| 172 | { |
| 173 | return *(Int*)&b[0]; |
| 174 | } |
| 175 | |
| 176 | // Get the address of the last byte in a block |
| 177 | static __inline__ |
| 178 | UByte* last_byte ( Block* b ) |
| 179 | { |
| 180 | UByte* b2 = (UByte*)b; |
| 181 | return &b2[mk_plain_bszB(get_bszB_lo(b)) - 1]; |
| 182 | } |
| 183 | |
| 184 | // Set and get the upper size field of a block. |
| 185 | static __inline__ |
| 186 | void set_bszB_hi ( Block* b, Int bszB ) |
| 187 | { |
| 188 | UByte* b2 = (UByte*)b; |
| 189 | UByte* lb = last_byte(b); |
| 190 | vg_assert(lb == &b2[mk_plain_bszB(bszB) - 1]); |
| 191 | *(Int*)&lb[-sizeof(Int) + 1] = bszB; |
| 192 | } |
| 193 | static __inline__ |
| 194 | Int get_bszB_hi ( Block* b ) |
| 195 | { |
| 196 | UByte* lb = last_byte(b); |
| 197 | return *(Int*)&lb[-sizeof(Int) + 1]; |
| 198 | } |
| 199 | |
| 200 | |
| 201 | // Given the addr of a block, return the addr of its payload. |
| 202 | static __inline__ |
| 203 | UByte* get_block_payload ( Arena* a, Block* b ) |
| 204 | { |
| 205 | UByte* b2 = (UByte*)b; |
| 206 | return & b2[sizeof(Int) + sizeof(void*) + a->rz_szB]; |
| 207 | } |
| 208 | // Given the addr of a block's payload, return the addr of the block itself. |
| 209 | static __inline__ |
| 210 | Block* get_payload_block ( Arena* a, UByte* payload ) |
| 211 | { |
| 212 | return (Block*)&payload[-sizeof(Int) - sizeof(void*) - a->rz_szB]; |
| 213 | } |
| 214 | |
| 215 | |
| 216 | // Set and get the next and previous link fields of a block. |
| 217 | static __inline__ |
| 218 | void set_prev_b ( Block* b, Block* prev_p ) |
| 219 | { |
| 220 | UByte* b2 = (UByte*)b; |
| 221 | *(Block**)&b2[sizeof(Int)] = prev_p; |
| 222 | } |
| 223 | static __inline__ |
| 224 | void set_next_b ( Block* b, Block* next_p ) |
| 225 | { |
| 226 | UByte* lb = last_byte(b); |
| 227 | *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1] = next_p; |
| 228 | } |
| 229 | static __inline__ |
| 230 | Block* get_prev_b ( Block* b ) |
| 231 | { |
| 232 | UByte* b2 = (UByte*)b; |
| 233 | return *(Block**)&b2[sizeof(Int)]; |
| 234 | } |
| 235 | static __inline__ |
| 236 | Block* get_next_b ( Block* b ) |
| 237 | { |
| 238 | UByte* lb = last_byte(b); |
| 239 | return *(Block**)&lb[-sizeof(Int) - sizeof(void*) + 1]; |
| 240 | } |
| 241 | |
| 242 | |
| 243 | // Get the block immediately preceding this one in the Superblock. |
| 244 | static __inline__ |
| 245 | Block* get_predecessor_block ( Block* b ) |
| 246 | { |
| 247 | UByte* b2 = (UByte*)b; |
| 248 | Int bszB = mk_plain_bszB( (*(Int*)&b2[-sizeof(Int)]) ); |
| 249 | return (Block*)&b2[-bszB]; |
| 250 | } |
| 251 | |
| 252 | // Read and write the lower and upper red-zone bytes of a block. |
| 253 | static __inline__ |
| 254 | void set_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno, UByte v ) |
| 255 | { |
| 256 | UByte* b2 = (UByte*)b; |
| 257 | b2[sizeof(Int) + sizeof(void*) + rz_byteno] = v; |
| 258 | } |
| 259 | static __inline__ |
| 260 | void set_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno, UByte v ) |
| 261 | { |
| 262 | UByte* lb = last_byte(b); |
| 263 | lb[-sizeof(Int) - sizeof(void*) - rz_byteno] = v; |
| 264 | } |
| 265 | static __inline__ |
| 266 | UByte get_rz_lo_byte ( Arena* a, Block* b, Int rz_byteno ) |
| 267 | { |
| 268 | UByte* b2 = (UByte*)b; |
| 269 | return b2[sizeof(Int) + sizeof(void*) + rz_byteno]; |
| 270 | } |
| 271 | static __inline__ |
| 272 | UByte get_rz_hi_byte ( Arena* a, Block* b, Int rz_byteno ) |
| 273 | { |
| 274 | UByte* lb = last_byte(b); |
| 275 | return lb[-sizeof(Int) - sizeof(void*) - rz_byteno]; |
| 276 | } |
| 277 | |
| 278 | |
| 279 | /* Return the lower, upper and total overhead in bytes for a block. |
| 280 | These are determined purely by which arena the block lives in. */ |
| 281 | static __inline__ |
| 282 | Int overhead_szB_lo ( Arena* a ) |
| 283 | { |
| 284 | return sizeof(Int) + sizeof(void*) + a->rz_szB; |
| 285 | } |
| 286 | static __inline__ |
| 287 | Int overhead_szB_hi ( Arena* a ) |
| 288 | { |
| 289 | return sizeof(void*) + sizeof(Int) + a->rz_szB; |
| 290 | } |
| 291 | static __inline__ |
| 292 | Int overhead_szB ( Arena* a ) |
| 293 | { |
| 294 | return overhead_szB_lo(a) + overhead_szB_hi(a); |
| 295 | } |
| 296 | |
| 297 | // Return the minimum bszB for a block in this arena. Can have zero-length |
| 298 | // payloads, so it's the size of the admin bytes. |
| 299 | static __inline__ |
| 300 | Int min_useful_bszB ( Arena* a ) |
| 301 | { |
| 302 | return overhead_szB(a); |
| 303 | } |
| 304 | |
| 305 | // Convert payload size <--> block size (both in bytes). |
| 306 | static __inline__ |
| 307 | Int pszB_to_bszB ( Arena* a, Int pszB ) |
| 308 | { |
| 309 | vg_assert(pszB >= 0); |
| 310 | return pszB + overhead_szB(a); |
| 311 | } |
| 312 | static __inline__ |
| 313 | Int bszB_to_pszB ( Arena* a, Int bszB ) |
| 314 | { |
| 315 | Int pszB = bszB - overhead_szB(a); |
| 316 | vg_assert(pszB >= 0); |
| 317 | return pszB; |
| 318 | } |
| 319 | |
| 320 | |
| 321 | /*------------------------------------------------------------*/ |
| 322 | /*--- Arena management ---*/ |
| 323 | /*------------------------------------------------------------*/ |
| 324 | |
| 325 | #define CORE_ARENA_MIN_SZB 1048576 |
| 326 | |
| 327 | // The arena structures themselves. |
| 328 | static Arena vg_arena[VG_N_ARENAS]; |
| 329 | |
| 330 | // Functions external to this module identify arenas using ArenaIds, |
| 331 | // not Arena*s. This fn converts the former to the latter. |
| 332 | static Arena* arenaId_to_ArenaP ( ArenaId arena ) |
| 333 | { |
| 334 | vg_assert(arena >= 0 && arena < VG_N_ARENAS); |
| 335 | return & vg_arena[arena]; |
| 336 | } |
| 337 | |
| 338 | // Initialise an arena. rz_szB is the minimum redzone size; it might be |
| 339 | // made bigger to ensure that VG_MIN_MALLOC_ALIGNMENT is observed. |
| 340 | static |
| 341 | void arena_init ( ArenaId aid, Char* name, Int rz_szB, Int min_sblock_szB ) |
| 342 | { |
| 343 | Int i; |
| 344 | Arena* a = arenaId_to_ArenaP(aid); |
| 345 | |
| 346 | vg_assert(rz_szB >= 0); |
nethercote | 73b526f | 2004-10-31 18:48:21 +0000 | [diff] [blame^] | 347 | vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 348 | a->name = name; |
| 349 | a->clientmem = ( VG_AR_CLIENT == aid ? True : False ); |
| 350 | |
| 351 | // The size of the low and high admin sections in a block must be a |
| 352 | // multiple of VG_MIN_MALLOC_ALIGNMENT. So we round up the asked-for |
| 353 | // redzone size if necessary to achieve this. |
| 354 | a->rz_szB = rz_szB; |
| 355 | while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++; |
| 356 | vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a)); |
| 357 | |
| 358 | a->min_sblock_szB = min_sblock_szB; |
| 359 | for (i = 0; i < VG_N_MALLOC_LISTS; i++) a->freelist[i] = NULL; |
| 360 | a->sblocks = NULL; |
| 361 | a->bytes_on_loan = 0; |
| 362 | a->bytes_mmaped = 0; |
| 363 | a->bytes_on_loan_max = 0; |
| 364 | } |
| 365 | |
| 366 | /* Print vital stats for an arena. */ |
| 367 | void VG_(print_all_arena_stats) ( void ) |
| 368 | { |
| 369 | Int i; |
| 370 | for (i = 0; i < VG_N_ARENAS; i++) { |
| 371 | Arena* a = arenaId_to_ArenaP(i); |
| 372 | VG_(message)(Vg_DebugMsg, |
| 373 | "AR %8s: %8d mmap'd, %8d/%8d max/curr", |
| 374 | a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan |
| 375 | ); |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | /* This library is self-initialising, as it makes this more self-contained, |
| 380 | less coupled with the outside world. Hence VG_(arena_malloc)() and |
| 381 | VG_(arena_free)() below always call ensure_mm_init() to ensure things are |
| 382 | correctly initialised. */ |
| 383 | static |
| 384 | void ensure_mm_init ( void ) |
| 385 | { |
| 386 | static Int client_rz_szB; |
| 387 | static Bool init_done = False; |
| 388 | |
| 389 | if (init_done) { |
| 390 | // Make sure the client arena's redzone size never changes. Could |
| 391 | // happen if VG_(arena_malloc) was called too early, ie. before the |
| 392 | // tool was loaded. |
| 393 | vg_assert(client_rz_szB == VG_(vg_malloc_redzone_szB)); |
| 394 | return; |
| 395 | } |
| 396 | |
| 397 | /* No particular reason for this figure, it's just smallish */ |
| 398 | sk_assert(VG_(vg_malloc_redzone_szB) < 128); |
| 399 | sk_assert(VG_(vg_malloc_redzone_szB) >= 0); |
| 400 | client_rz_szB = VG_(vg_malloc_redzone_szB); |
| 401 | |
| 402 | /* Use checked red zones (of various sizes) for our internal stuff, |
| 403 | and an unchecked zone of arbitrary size for the client. Of |
| 404 | course the client's red zone can be checked by the tool, eg. |
| 405 | by using addressibility maps, but not by the mechanism implemented |
| 406 | here, which merely checks at the time of freeing that the red |
| 407 | zone bytes are unchanged. |
| 408 | |
| 409 | Nb: redzone sizes are *minimums*; they could be made bigger to ensure |
| 410 | alignment. Eg. on 32-bit machines, 4 becomes 8, and 12 becomes 16; |
| 411 | but on 64-bit machines 4 stays as 4, and 12 stays as 12 --- the extra |
| 412 | 4 bytes in both are accounted for by the larger prev/next ptr. |
| 413 | */ |
| 414 | arena_init ( VG_AR_CORE, "core", 4, CORE_ARENA_MIN_SZB ); |
| 415 | arena_init ( VG_AR_TOOL, "tool", 4, 1048576 ); |
| 416 | arena_init ( VG_AR_SYMTAB, "symtab", 4, 1048576 ); |
| 417 | arena_init ( VG_AR_JITTER, "JITter", 4, 32768 ); |
| 418 | arena_init ( VG_AR_CLIENT, "client", client_rz_szB, 1048576 ); |
| 419 | arena_init ( VG_AR_DEMANGLE, "demangle", 12/*paranoid*/, 65536 ); |
| 420 | arena_init ( VG_AR_EXECTXT, "exectxt", 4, 65536 ); |
| 421 | arena_init ( VG_AR_ERRORS, "errors", 4, 65536 ); |
| 422 | arena_init ( VG_AR_TRANSIENT, "transien", 4, 65536 ); |
| 423 | |
| 424 | init_done = True; |
| 425 | # ifdef DEBUG_MALLOC |
| 426 | VG_(sanity_check_malloc_all)(); |
| 427 | # endif |
| 428 | } |
| 429 | |
| 430 | |
| 431 | /*------------------------------------------------------------*/ |
| 432 | /*--- Superblock management ---*/ |
| 433 | /*------------------------------------------------------------*/ |
| 434 | |
| 435 | // Align ptr p upwards to an align-sized boundary. |
| 436 | static |
| 437 | void* align_upwards ( void* p, Int align ) |
| 438 | { |
| 439 | Addr a = (Addr)p; |
| 440 | if ((a % align) == 0) return (void*)a; |
| 441 | return (void*)(a - (a % align) + align); |
| 442 | } |
| 443 | |
| 444 | // If not enough memory available, either aborts (for non-client memory) |
| 445 | // or returns 0 (for client memory). |
| 446 | static |
| 447 | Superblock* newSuperblock ( Arena* a, Int cszB ) |
| 448 | { |
| 449 | // The extra VG_MIN_MALLOC_SZB bytes are for possible alignment up. |
| 450 | static UByte bootstrap_superblock[CORE_ARENA_MIN_SZB+VG_MIN_MALLOC_SZB]; |
| 451 | static Bool called_before = False; |
| 452 | Superblock* sb; |
| 453 | |
| 454 | // Take into account admin bytes in the Superblock. |
| 455 | cszB += sizeof(Superblock); |
| 456 | |
| 457 | if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB; |
nethercote | 73b526f | 2004-10-31 18:48:21 +0000 | [diff] [blame^] | 458 | while ((cszB % VKI_PAGE_SIZE) > 0) cszB++; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 459 | |
| 460 | if (!called_before) { |
| 461 | // First time we're called -- use the special static bootstrap |
| 462 | // superblock (see comment at top of main() for details). |
| 463 | called_before = True; |
| 464 | vg_assert(a == arenaId_to_ArenaP(VG_AR_CORE)); |
| 465 | vg_assert(CORE_ARENA_MIN_SZB >= cszB); |
| 466 | // Ensure sb is suitably aligned. |
| 467 | sb = (Superblock*)align_upwards( bootstrap_superblock, |
| 468 | VG_MIN_MALLOC_SZB ); |
| 469 | } else if (a->clientmem) { |
| 470 | // client allocation -- return 0 to client if it fails |
| 471 | sb = (Superblock *) |
| 472 | VG_(client_alloc)(0, cszB, |
| 473 | VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, 0); |
| 474 | if (NULL == sb) |
| 475 | return 0; |
| 476 | } else { |
| 477 | // non-client allocation -- aborts if it fails |
| 478 | sb = VG_(get_memory_from_mmap) ( cszB, "newSuperblock" ); |
| 479 | } |
| 480 | vg_assert(NULL != sb); |
| 481 | vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB); |
| 482 | sb->n_payload_bytes = cszB - sizeof(Superblock); |
| 483 | a->bytes_mmaped += cszB; |
| 484 | if (0) |
| 485 | VG_(message)(Vg_DebugMsg, "newSuperblock, %d payload bytes", |
| 486 | sb->n_payload_bytes); |
| 487 | return sb; |
| 488 | } |
| 489 | |
| 490 | // Find the superblock containing the given chunk. |
| 491 | static |
| 492 | Superblock* findSb ( Arena* a, Block* b ) |
| 493 | { |
| 494 | Superblock* sb; |
| 495 | for (sb = a->sblocks; sb; sb = sb->next) |
| 496 | if ((Block*)&sb->payload_bytes[0] <= b |
| 497 | && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes]) |
| 498 | return sb; |
| 499 | VG_(printf)("findSb: can't find pointer %p in arena `%s'\n", b, a->name ); |
| 500 | VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?"); |
| 501 | return NULL; /*NOTREACHED*/ |
| 502 | } |
| 503 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 504 | |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 505 | /*------------------------------------------------------------*/ |
| 506 | /*--- Command line options ---*/ |
| 507 | /*------------------------------------------------------------*/ |
| 508 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 509 | /* Round malloc sizes up to a multiple of VG_SLOPPY_MALLOC_SZB bytes? |
| 510 | default: NO |
| 511 | Nb: the allocator always rounds blocks up to a multiple of |
| 512 | VG_MIN_MALLOC_SZB. VG_(clo_sloppy_malloc) is relevant eg. for |
| 513 | Memcheck, which will be byte-precise with addressability maps on its |
| 514 | malloc allocations unless --sloppy-malloc=yes. */ |
| 515 | Bool VG_(clo_sloppy_malloc) = False; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 516 | |
| 517 | /* DEBUG: print malloc details? default: NO */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 518 | Bool VG_(clo_trace_malloc) = False; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 519 | |
| 520 | /* Minimum alignment in functions that don't specify alignment explicitly. |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 521 | default: 0, i.e. use VG_MIN_MALLOC_SZB. */ |
| 522 | Int VG_(clo_alignment) = VG_MIN_MALLOC_SZB; |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 523 | |
| 524 | |
| 525 | Bool VG_(replacement_malloc_process_cmd_line_option)(Char* arg) |
| 526 | { |
jseward | b1a26ae | 2004-03-14 03:06:37 +0000 | [diff] [blame] | 527 | if (VG_CLO_STREQN(12, arg, "--alignment=")) { |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 528 | VG_(clo_alignment) = (Int)VG_(atoll)(&arg[12]); |
| 529 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 530 | if (VG_(clo_alignment) < VG_MIN_MALLOC_SZB |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 531 | || VG_(clo_alignment) > 4096 |
| 532 | || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) { |
| 533 | VG_(message)(Vg_UserMsg, ""); |
| 534 | VG_(message)(Vg_UserMsg, |
| 535 | "Invalid --alignment= setting. " |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 536 | "Should be a power of 2, >= %d, <= 4096.", VG_MIN_MALLOC_SZB); |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 537 | VG_(bad_option)("--alignment"); |
| 538 | } |
| 539 | } |
| 540 | |
nethercote | f28481f | 2004-07-10 13:56:19 +0000 | [diff] [blame] | 541 | else VG_BOOL_CLO("--sloppy-malloc", VG_(clo_sloppy_malloc)) |
| 542 | else VG_BOOL_CLO("--trace-malloc", VG_(clo_trace_malloc)) |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 543 | else |
| 544 | return False; |
| 545 | |
| 546 | return True; |
| 547 | } |
| 548 | |
| 549 | void VG_(replacement_malloc_print_usage)(void) |
| 550 | { |
| 551 | VG_(printf)( |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 552 | " --sloppy-malloc=no|yes round malloc sizes to multiple of %d? [no]\n" |
| 553 | " --alignment=<number> set minimum alignment of allocations [%d]\n", |
| 554 | VG_SLOPPY_MALLOC_SZB, VG_MIN_MALLOC_SZB |
fitzhardinge | 98abfc7 | 2003-12-16 02:05:15 +0000 | [diff] [blame] | 555 | ); |
| 556 | } |
| 557 | |
| 558 | void VG_(replacement_malloc_print_debug_usage)(void) |
| 559 | { |
| 560 | VG_(printf)( |
| 561 | " --trace-malloc=no|yes show client malloc details? [no]\n" |
| 562 | ); |
| 563 | } |
| 564 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 565 | |
| 566 | /*------------------------------------------------------------*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 567 | /*--- Functions for working with freelists. ---*/ |
| 568 | /*------------------------------------------------------------*/ |
| 569 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 570 | // Nb: Determination of which freelist a block lives on is based on the |
| 571 | // payload size, not block size. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 572 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 573 | // Convert a payload size in bytes to a freelist number. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 574 | static |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 575 | Int pszB_to_listNo ( Int pszB ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 576 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 577 | vg_assert(pszB >= 0); |
| 578 | vg_assert(0 == pszB % VG_MIN_MALLOC_SZB); |
| 579 | pszB /= VG_MIN_MALLOC_SZB; |
| 580 | if (pszB <= 2) return 0; |
| 581 | if (pszB <= 3) return 1; |
| 582 | if (pszB <= 4) return 2; |
| 583 | if (pszB <= 5) return 3; |
| 584 | if (pszB <= 6) return 4; |
| 585 | if (pszB <= 7) return 5; |
| 586 | if (pszB <= 8) return 6; |
| 587 | if (pszB <= 9) return 7; |
| 588 | if (pszB <= 10) return 8; |
| 589 | if (pszB <= 11) return 9; |
| 590 | if (pszB <= 12) return 10; |
| 591 | if (pszB <= 16) return 11; |
| 592 | if (pszB <= 32) return 12; |
| 593 | if (pszB <= 64) return 13; |
| 594 | if (pszB <= 128) return 14; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 595 | return 15; |
| 596 | } |
| 597 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 598 | // What is the minimum payload size for a given list? |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 599 | static |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 600 | Int listNo_to_pszB_min ( Int listNo ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 601 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 602 | Int pszB = 0; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 603 | vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 604 | while (pszB_to_listNo(pszB) < listNo) pszB += VG_MIN_MALLOC_SZB; |
| 605 | return pszB; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 606 | } |
| 607 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 608 | // What is the maximum payload size for a given list? |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 609 | static |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 610 | Int listNo_to_pszB_max ( Int listNo ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 611 | { |
| 612 | vg_assert(listNo >= 0 && listNo <= VG_N_MALLOC_LISTS); |
| 613 | if (listNo == VG_N_MALLOC_LISTS-1) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 614 | return MAX_PSZB; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 615 | } else { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 616 | return listNo_to_pszB_min(listNo+1) - 1; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 617 | } |
| 618 | } |
| 619 | |
| 620 | |
| 621 | /* A nasty hack to try and reduce fragmentation. Try and replace |
| 622 | a->freelist[lno] with another block on the same list but with a |
| 623 | lower address, with the idea of attempting to recycle the same |
| 624 | blocks rather than cruise through the address space. */ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 625 | static |
| 626 | void swizzle ( Arena* a, Int lno ) |
| 627 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 628 | Block* p_best; |
| 629 | Block* pp; |
| 630 | Block* pn; |
| 631 | Int i; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 632 | |
| 633 | p_best = a->freelist[lno]; |
| 634 | if (p_best == NULL) return; |
| 635 | |
| 636 | pn = pp = p_best; |
| 637 | for (i = 0; i < 20; i++) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 638 | pn = get_next_b(pn); |
| 639 | pp = get_prev_b(pp); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 640 | if (pn < p_best) p_best = pn; |
| 641 | if (pp < p_best) p_best = pp; |
| 642 | } |
| 643 | if (p_best < a->freelist[lno]) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 644 | # ifdef VERBOSE_MALLOC |
| 645 | VG_(printf)("retreat by %d\n", a->freelist[lno] - p_best); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 646 | # endif |
| 647 | a->freelist[lno] = p_best; |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | |
| 652 | /*------------------------------------------------------------*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 653 | /*--- Sanity-check/debugging machinery. ---*/ |
| 654 | /*------------------------------------------------------------*/ |
| 655 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 656 | #define VG_REDZONE_LO_MASK 0x31 |
| 657 | #define VG_REDZONE_HI_MASK 0x7c |
| 658 | |
| 659 | // Do some crude sanity checks on a chunk. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 660 | static |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 661 | Bool blockSane ( Arena* a, Block* b ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 662 | { |
| 663 | # define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str) |
| 664 | Int i; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 665 | if (get_bszB_lo(b) != get_bszB_hi(b)) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 666 | {BLEAT("sizes");return False;} |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 667 | if (!a->clientmem && is_inuse_bszB(get_bszB_lo(b))) { |
| 668 | for (i = 0; i < a->rz_szB; i++) { |
| 669 | if (get_rz_lo_byte(a, b, i) != |
| 670 | (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK)) |
| 671 | {BLEAT("redzone-lo");return False;} |
| 672 | if (get_rz_hi_byte(a, b, i) != |
| 673 | (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK)) |
| 674 | {BLEAT("redzone-hi");return False;} |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 675 | } |
| 676 | } |
| 677 | return True; |
| 678 | # undef BLEAT |
| 679 | } |
| 680 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 681 | // Print superblocks (only for debugging). |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 682 | static |
| 683 | void ppSuperblocks ( Arena* a ) |
| 684 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 685 | Int i, b_bszB, blockno; |
| 686 | Block* b; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 687 | Superblock* sb = a->sblocks; |
| 688 | blockno = 1; |
| 689 | |
| 690 | while (sb) { |
| 691 | VG_(printf)( "\n" ); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 692 | VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %d, next = %p\n", |
| 693 | blockno++, sb, sb->n_payload_bytes, sb->next ); |
| 694 | for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) { |
| 695 | b = (Block*)&sb->payload_bytes[i]; |
| 696 | b_bszB = get_bszB_lo(b); |
| 697 | VG_(printf)( " block at %d, bszB %d: ", i, mk_plain_bszB(b_bszB) ); |
| 698 | VG_(printf)( "%s, ", is_inuse_bszB(b_bszB) ? "inuse" : "free"); |
| 699 | VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 700 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 701 | vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 702 | sb = sb->next; |
| 703 | } |
| 704 | VG_(printf)( "end of superblocks\n\n" ); |
| 705 | } |
| 706 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 707 | // Sanity check both the superblocks and the chains. |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 708 | static void sanity_check_malloc_arena ( ArenaId aid ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 709 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 710 | Int i, superblockctr, b_bszB, b_pszB, blockctr_sb, blockctr_li; |
| 711 | Int blockctr_sb_free, listno, list_min_pszB, list_max_pszB; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 712 | Superblock* sb; |
| 713 | Bool thisFree, lastWasFree; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 714 | Block* b; |
| 715 | Block* b_prev; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 716 | UInt arena_bytes_on_loan; |
| 717 | Arena* a; |
| 718 | |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 719 | # define BOMB VG_(core_panic)("sanity_check_malloc_arena") |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 720 | |
| 721 | a = arenaId_to_ArenaP(aid); |
| 722 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 723 | // First, traverse all the superblocks, inspecting the Blocks in each. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 724 | superblockctr = blockctr_sb = blockctr_sb_free = 0; |
| 725 | arena_bytes_on_loan = 0; |
| 726 | sb = a->sblocks; |
| 727 | while (sb) { |
| 728 | lastWasFree = False; |
| 729 | superblockctr++; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 730 | for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) { |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 731 | blockctr_sb++; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 732 | b = (Block*)&sb->payload_bytes[i]; |
| 733 | b_bszB = get_bszB_lo(b); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 734 | if (!blockSane(a, b)) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 735 | VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): " |
| 736 | " BAD\n", sb, i, b_bszB ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 737 | BOMB; |
| 738 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 739 | thisFree = !is_inuse_bszB(b_bszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 740 | if (thisFree && lastWasFree) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 741 | VG_(printf)("sanity_check_malloc_arena: sb %p, block %d (bszB %d): " |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 742 | "UNMERGED FREES\n", |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 743 | sb, i, b_bszB ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 744 | BOMB; |
| 745 | } |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 746 | if (thisFree) blockctr_sb_free++; |
| 747 | if (!thisFree) |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 748 | arena_bytes_on_loan += bszB_to_pszB(a, b_bszB); |
| 749 | lastWasFree = thisFree; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 750 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 751 | if (i > sb->n_payload_bytes) { |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 752 | VG_(printf)( "sanity_check_malloc_arena: sb %p: last block " |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 753 | "overshoots end\n", sb); |
| 754 | BOMB; |
| 755 | } |
| 756 | sb = sb->next; |
| 757 | } |
| 758 | |
| 759 | if (arena_bytes_on_loan != a->bytes_on_loan) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 760 | # ifdef VERBOSE_MALLOC |
| 761 | VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %d, " |
| 762 | "arena_bytes_on_loan %d: " |
| 763 | "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan); |
| 764 | # endif |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 765 | ppSuperblocks(a); |
| 766 | BOMB; |
| 767 | } |
| 768 | |
| 769 | /* Second, traverse each list, checking that the back pointers make |
| 770 | sense, counting blocks encountered, and checking that each block |
| 771 | is an appropriate size for this list. */ |
| 772 | blockctr_li = 0; |
| 773 | for (listno = 0; listno < VG_N_MALLOC_LISTS; listno++) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 774 | list_min_pszB = listNo_to_pszB_min(listno); |
| 775 | list_max_pszB = listNo_to_pszB_max(listno); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 776 | b = a->freelist[listno]; |
| 777 | if (b == NULL) continue; |
| 778 | while (True) { |
| 779 | b_prev = b; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 780 | b = get_next_b(b); |
| 781 | if (get_prev_b(b) != b_prev) { |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 782 | VG_(printf)( "sanity_check_malloc_arena: list %d at %p: " |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 783 | "BAD LINKAGE\n", |
| 784 | listno, b ); |
| 785 | BOMB; |
| 786 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 787 | b_pszB = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b))); |
| 788 | if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) { |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 789 | VG_(printf)( |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 790 | "sanity_check_malloc_arena: list %d at %p: " |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 791 | "WRONG CHAIN SIZE %dB (%dB, %dB)\n", |
| 792 | listno, b, b_pszB, list_min_pszB, list_max_pszB ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 793 | BOMB; |
| 794 | } |
| 795 | blockctr_li++; |
| 796 | if (b == a->freelist[listno]) break; |
| 797 | } |
| 798 | } |
| 799 | |
| 800 | if (blockctr_sb_free != blockctr_li) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 801 | # ifdef VERBOSE_MALLOC |
| 802 | VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH " |
| 803 | "(via sbs %d, via lists %d)\n", |
| 804 | blockctr_sb_free, blockctr_li ); |
| 805 | # endif |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 806 | ppSuperblocks(a); |
| 807 | BOMB; |
| 808 | } |
| 809 | |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 810 | if (VG_(clo_verbosity) > 2) |
| 811 | VG_(message)(Vg_DebugMsg, |
| 812 | "AR %8s: %2d sbs, %5d bs, %2d/%-2d free bs, " |
| 813 | "%7d mmap, %7d loan", |
| 814 | a->name, |
| 815 | superblockctr, |
| 816 | blockctr_sb, blockctr_sb_free, blockctr_li, |
| 817 | a->bytes_mmaped, a->bytes_on_loan); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 818 | # undef BOMB |
| 819 | } |
| 820 | |
| 821 | |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 822 | void VG_(sanity_check_malloc_all) ( void ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 823 | { |
| 824 | Int i; |
| 825 | for (i = 0; i < VG_N_ARENAS; i++) |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 826 | sanity_check_malloc_arena ( i ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 827 | } |
| 828 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 829 | /* Really, this isn't the right place for this. Nevertheless: find |
| 830 | out if an arena is empty -- currently has no bytes on loan. This |
| 831 | is useful for checking for memory leaks (of valgrind, not the |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 832 | client.) */ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 833 | Bool VG_(is_empty_arena) ( ArenaId aid ) |
| 834 | { |
| 835 | Arena* a; |
| 836 | Superblock* sb; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 837 | Block* b; |
| 838 | Int b_bszB; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 839 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 840 | ensure_mm_init(); |
| 841 | a = arenaId_to_ArenaP(aid); |
| 842 | for (sb = a->sblocks; sb != NULL; sb = sb->next) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 843 | // If the superblock is empty, it should contain a single free |
| 844 | // block, of the right size. |
| 845 | b = (Block*)&sb->payload_bytes[0]; |
| 846 | b_bszB = get_bszB_lo(b); |
| 847 | if (is_inuse_bszB(b_bszB)) return False; |
| 848 | if (mk_plain_bszB(b_bszB) != sb->n_payload_bytes) return False; |
| 849 | // If we reach here, this block is not in use and is of the right |
| 850 | // size, so keep going around the loop... |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 851 | } |
| 852 | return True; |
| 853 | } |
| 854 | |
| 855 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 856 | /*------------------------------------------------------------*/ |
| 857 | /*--- Creating and deleting blocks. ---*/ |
| 858 | /*------------------------------------------------------------*/ |
| 859 | |
| 860 | // Mark the bytes at b .. b+bszB-1 as not in use, and add them to the |
| 861 | // relevant free list. |
| 862 | |
| 863 | static |
| 864 | void mkFreeBlock ( Arena* a, Block* b, Int bszB, Int b_lno ) |
jseward | b1a26ae | 2004-03-14 03:06:37 +0000 | [diff] [blame] | 865 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 866 | Int pszB = bszB_to_pszB(a, bszB); |
| 867 | vg_assert(pszB >= 0); |
| 868 | vg_assert(b_lno == pszB_to_listNo(pszB)); |
| 869 | // Set the size fields and indicate not-in-use. |
| 870 | set_bszB_lo(b, mk_free_bszB(bszB)); |
| 871 | set_bszB_hi(b, mk_free_bszB(bszB)); |
| 872 | |
| 873 | // Add to the relevant list. |
| 874 | if (a->freelist[b_lno] == NULL) { |
| 875 | set_prev_b(b, b); |
| 876 | set_next_b(b, b); |
| 877 | a->freelist[b_lno] = b; |
| 878 | } else { |
| 879 | Block* b_prev = get_prev_b(a->freelist[b_lno]); |
| 880 | Block* b_next = a->freelist[b_lno]; |
| 881 | set_next_b(b_prev, b); |
| 882 | set_prev_b(b_next, b); |
| 883 | set_next_b(b, b_next); |
| 884 | set_prev_b(b, b_prev); |
| 885 | } |
| 886 | # ifdef DEBUG_MALLOC |
| 887 | (void)blockSane(a,b); |
| 888 | # endif |
| 889 | } |
| 890 | |
| 891 | // Mark the bytes at b .. b+bszB-1 as in use, and set up the block |
| 892 | // appropriately. |
| 893 | static |
| 894 | void mkInuseBlock ( Arena* a, Block* b, UInt bszB ) |
| 895 | { |
| 896 | Int i; |
| 897 | vg_assert(bszB >= min_useful_bszB(a)); |
| 898 | set_bszB_lo(b, mk_inuse_bszB(bszB)); |
| 899 | set_bszB_hi(b, mk_inuse_bszB(bszB)); |
| 900 | set_prev_b(b, NULL); // Take off freelist |
| 901 | set_next_b(b, NULL); // ditto |
| 902 | if (!a->clientmem) { |
| 903 | for (i = 0; i < a->rz_szB; i++) { |
| 904 | set_rz_lo_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_LO_MASK)); |
| 905 | set_rz_hi_byte(a, b, i, (UByte)(((Addr)b&0xff) ^ VG_REDZONE_HI_MASK)); |
| 906 | } |
| 907 | } |
| 908 | # ifdef DEBUG_MALLOC |
| 909 | (void)blockSane(a,b); |
| 910 | # endif |
| 911 | } |
| 912 | |
| 913 | // Remove a block from a given list. Does no sanity checking. |
| 914 | static |
| 915 | void unlinkBlock ( Arena* a, Block* b, Int listno ) |
| 916 | { |
| 917 | vg_assert(listno >= 0 && listno < VG_N_MALLOC_LISTS); |
| 918 | if (get_prev_b(b) == b) { |
| 919 | // Only one element in the list; treat it specially. |
| 920 | vg_assert(get_next_b(b) == b); |
| 921 | a->freelist[listno] = NULL; |
| 922 | } else { |
| 923 | Block* b_prev = get_prev_b(b); |
| 924 | Block* b_next = get_next_b(b); |
| 925 | a->freelist[listno] = b_prev; |
| 926 | set_next_b(b_prev, b_next); |
| 927 | set_prev_b(b_next, b_prev); |
| 928 | swizzle ( a, listno ); |
| 929 | } |
| 930 | set_prev_b(b, NULL); |
| 931 | set_next_b(b, NULL); |
jseward | b1a26ae | 2004-03-14 03:06:37 +0000 | [diff] [blame] | 932 | } |
| 933 | |
| 934 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 935 | /*------------------------------------------------------------*/ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 936 | /*--- Core-visible functions. ---*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 937 | /*------------------------------------------------------------*/ |
| 938 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 939 | // Align the request size. |
| 940 | static __inline__ |
| 941 | Int align_req_pszB ( Int req_pszB ) |
| 942 | { |
| 943 | Int n = VG_MIN_MALLOC_SZB-1; |
| 944 | return ((req_pszB + n) & (~n)); |
| 945 | } |
| 946 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 947 | void* VG_(arena_malloc) ( ArenaId aid, Int req_pszB ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 948 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 949 | Int req_bszB, frag_bszB, b_bszB, lno; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 950 | Superblock* new_sb; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 951 | Block* b = NULL; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 952 | Arena* a; |
jseward | b1a26ae | 2004-03-14 03:06:37 +0000 | [diff] [blame] | 953 | void* v; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 954 | |
| 955 | VGP_PUSHCC(VgpMalloc); |
| 956 | |
| 957 | ensure_mm_init(); |
| 958 | a = arenaId_to_ArenaP(aid); |
| 959 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 960 | vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB); |
| 961 | req_pszB = align_req_pszB(req_pszB); |
| 962 | req_bszB = pszB_to_bszB(a, req_pszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 963 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 964 | // Scan through all the big-enough freelists for a block. |
| 965 | for (lno = pszB_to_listNo(req_pszB); lno < VG_N_MALLOC_LISTS; lno++) { |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 966 | b = a->freelist[lno]; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 967 | if (NULL == b) continue; // If this list is empty, try the next one. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 968 | while (True) { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 969 | b_bszB = mk_plain_bszB(get_bszB_lo(b)); |
| 970 | if (b_bszB >= req_bszB) goto obtained_block; // success! |
| 971 | b = get_next_b(b); |
| 972 | if (b == a->freelist[lno]) break; // traversed entire freelist |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 973 | } |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 974 | } |
| 975 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 976 | // If we reach here, no suitable block found, allocate a new superblock |
| 977 | vg_assert(lno == VG_N_MALLOC_LISTS); |
| 978 | new_sb = newSuperblock(a, req_bszB); |
| 979 | if (NULL == new_sb) { |
| 980 | // Should only fail if for client, otherwise, should have aborted |
| 981 | // already. |
| 982 | vg_assert(VG_AR_CLIENT == aid); |
| 983 | return NULL; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 984 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 985 | new_sb->next = a->sblocks; |
| 986 | a->sblocks = new_sb; |
| 987 | b = (Block*)&new_sb->payload_bytes[0]; |
| 988 | lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes)); |
| 989 | mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno); |
| 990 | // fall through |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 991 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 992 | obtained_block: |
| 993 | // Ok, we can allocate from b, which lives in list lno. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 994 | vg_assert(b != NULL); |
| 995 | vg_assert(lno >= 0 && lno < VG_N_MALLOC_LISTS); |
| 996 | vg_assert(a->freelist[lno] != NULL); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 997 | b_bszB = mk_plain_bszB(get_bszB_lo(b)); |
| 998 | // req_bszB is the size of the block we are after. b_bszB is the |
| 999 | // size of what we've actually got. */ |
| 1000 | vg_assert(b_bszB >= req_bszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1001 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1002 | // Could we split this block and still get a useful fragment? |
| 1003 | frag_bszB = b_bszB - req_bszB; |
| 1004 | if (frag_bszB >= min_useful_bszB(a)) { |
| 1005 | // Yes, split block in two, put the fragment on the appropriate free |
| 1006 | // list, and update b_bszB accordingly. |
| 1007 | // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1008 | unlinkBlock(a, b, lno); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1009 | mkInuseBlock(a, b, req_bszB); |
| 1010 | mkFreeBlock(a, &b[req_bszB], frag_bszB, |
| 1011 | pszB_to_listNo(bszB_to_pszB(a, frag_bszB))); |
| 1012 | b_bszB = mk_plain_bszB(get_bszB_lo(b)); |
| 1013 | } else { |
| 1014 | // No, mark as in use and use as-is. |
| 1015 | unlinkBlock(a, b, lno); |
| 1016 | mkInuseBlock(a, b, b_bszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1017 | } |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1018 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1019 | // Update stats |
| 1020 | a->bytes_on_loan += bszB_to_pszB(a, b_bszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1021 | if (a->bytes_on_loan > a->bytes_on_loan_max) |
| 1022 | a->bytes_on_loan_max = a->bytes_on_loan; |
| 1023 | |
| 1024 | # ifdef DEBUG_MALLOC |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 1025 | sanity_check_malloc_arena(aid); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1026 | # endif |
| 1027 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1028 | VGP_POPCC(VgpMalloc); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1029 | v = get_block_payload(a, b); |
| 1030 | vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 ); |
jseward | b1a26ae | 2004-03-14 03:06:37 +0000 | [diff] [blame] | 1031 | return v; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1032 | } |
| 1033 | |
| 1034 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1035 | void VG_(arena_free) ( ArenaId aid, void* ptr ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1036 | { |
| 1037 | Superblock* sb; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1038 | UByte* sb_start; |
| 1039 | UByte* sb_end; |
| 1040 | Block* other; |
| 1041 | Block* b; |
| 1042 | Int b_bszB, b_pszB, other_bszB, b_listno; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1043 | Arena* a; |
| 1044 | |
| 1045 | VGP_PUSHCC(VgpMalloc); |
| 1046 | |
| 1047 | ensure_mm_init(); |
| 1048 | a = arenaId_to_ArenaP(aid); |
| 1049 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1050 | if (ptr == NULL) { |
| 1051 | VGP_POPCC(VgpMalloc); |
| 1052 | return; |
| 1053 | } |
| 1054 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1055 | b = get_payload_block(a, ptr); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1056 | |
| 1057 | # ifdef DEBUG_MALLOC |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1058 | vg_assert(blockSane(a, b)); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1059 | # endif |
| 1060 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1061 | a->bytes_on_loan -= bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(b))); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1062 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1063 | sb = findSb( a, b ); |
| 1064 | sb_start = &sb->payload_bytes[0]; |
| 1065 | sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1]; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1066 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1067 | // Put this chunk back on a list somewhere. |
| 1068 | b_bszB = get_bszB_lo(b); |
| 1069 | b_pszB = bszB_to_pszB(a, b_bszB); |
| 1070 | b_listno = pszB_to_listNo(b_pszB); |
| 1071 | mkFreeBlock( a, b, b_bszB, b_listno ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1072 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1073 | // See if this block can be merged with its successor. |
| 1074 | // First test if we're far enough before the superblock's end to possibly |
| 1075 | // have a successor. |
| 1076 | other = b + b_bszB; |
| 1077 | if (other+min_useful_bszB(a)-1 <= (Block*)sb_end) { |
| 1078 | // Ok, we have a successor, merge if it's not in use. |
| 1079 | other_bszB = get_bszB_lo(other); |
| 1080 | if (!is_inuse_bszB(other_bszB)) { |
| 1081 | // VG_(printf)( "merge-successor\n"); |
| 1082 | other_bszB = mk_plain_bszB(other_bszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1083 | # ifdef DEBUG_MALLOC |
| 1084 | vg_assert(blockSane(a, other)); |
| 1085 | # endif |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1086 | unlinkBlock( a, b, b_listno ); |
| 1087 | unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) ); |
| 1088 | b_bszB += other_bszB; |
| 1089 | b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB)); |
| 1090 | mkFreeBlock( a, b, b_bszB, b_listno ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1091 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1092 | } else { |
| 1093 | // Not enough space for successor: check that b is the last block |
| 1094 | // ie. there are no unused bytes at the end of the Superblock. |
| 1095 | vg_assert(other-1 == (Block*)sb_end); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1096 | } |
| 1097 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1098 | // Then see if this block can be merged with its predecessor. |
| 1099 | // First test if we're far enough after the superblock's start to possibly |
| 1100 | // have a predecessor. |
| 1101 | if (b >= (Block*)sb_start + min_useful_bszB(a)) { |
| 1102 | // Ok, we have a predecessor, merge if it's not in use. |
| 1103 | other = get_predecessor_block( b ); |
| 1104 | other_bszB = get_bszB_lo(other); |
| 1105 | if (!is_inuse_bszB(other_bszB)) { |
| 1106 | // VG_(printf)( "merge-predecessor\n"); |
| 1107 | other_bszB = mk_plain_bszB(other_bszB); |
| 1108 | unlinkBlock( a, b, b_listno ); |
| 1109 | unlinkBlock( a, other, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) ); |
| 1110 | b = other; |
| 1111 | b_bszB += other_bszB; |
| 1112 | b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB)); |
| 1113 | mkFreeBlock( a, b, b_bszB, b_listno ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1114 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1115 | } else { |
| 1116 | // Not enough space for predecessor: check that b is the first block, |
| 1117 | // ie. there are no unused bytes at the start of the Superblock. |
| 1118 | vg_assert((Block*)sb_start == b); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1119 | } |
| 1120 | |
| 1121 | # ifdef DEBUG_MALLOC |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 1122 | sanity_check_malloc_arena(aid); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1123 | # endif |
| 1124 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1125 | VGP_POPCC(VgpMalloc); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1126 | } |
| 1127 | |
| 1128 | |
| 1129 | /* |
| 1130 | The idea for malloc_aligned() is to allocate a big block, base, and |
| 1131 | then split it into two parts: frag, which is returned to the the |
| 1132 | free pool, and align, which is the bit we're really after. Here's |
| 1133 | a picture. L and H denote the block lower and upper overheads, in |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1134 | bytes. The details are gruesome. Note it is slightly complicated |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1135 | because the initial request to generate base may return a bigger |
| 1136 | block than we asked for, so it is important to distinguish the base |
| 1137 | request size and the base actual size. |
| 1138 | |
| 1139 | frag_b align_b |
| 1140 | | | |
| 1141 | | frag_p | align_p |
| 1142 | | | | | |
| 1143 | v v v v |
| 1144 | |
| 1145 | +---+ +---+---+ +---+ |
| 1146 | | L |----------------| H | L |---------------| H | |
| 1147 | +---+ +---+---+ +---+ |
| 1148 | |
| 1149 | ^ ^ ^ |
| 1150 | | | : |
| 1151 | | base_p this addr must be aligned |
| 1152 | | |
| 1153 | base_b |
| 1154 | |
| 1155 | . . . . . . . |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1156 | <------ frag_bszB -------> . . . |
| 1157 | . <------------- base_pszB_act -----------> . |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1158 | . . . . . . . |
| 1159 | |
| 1160 | */ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1161 | void* VG_(arena_malloc_aligned) ( ArenaId aid, Int req_alignB, Int req_pszB ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1162 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1163 | Int base_pszB_req, base_pszB_act, frag_bszB; |
| 1164 | Block *base_b, *align_b; |
| 1165 | UByte *base_p, *align_p; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1166 | UInt saved_bytes_on_loan; |
| 1167 | Arena* a; |
| 1168 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1169 | VGP_PUSHCC(VgpMalloc); |
| 1170 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1171 | ensure_mm_init(); |
| 1172 | a = arenaId_to_ArenaP(aid); |
| 1173 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1174 | vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1175 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1176 | // Check that the requested alignment seems reasonable; that is, is |
| 1177 | // a power of 2. |
| 1178 | if (req_alignB < VG_MIN_MALLOC_SZB |
| 1179 | || req_alignB > 1048576 |
| 1180 | || VG_(log2)( VG_(clo_alignment) ) == -1 /* not a power of 2 */) { |
| 1181 | VG_(printf)("VG_(arena_malloc_aligned)(%p, %d, %d)\nbad alignment", |
| 1182 | a, req_alignB, req_pszB ); |
| 1183 | VG_(core_panic)("VG_(arena_malloc_aligned)"); |
| 1184 | /*NOTREACHED*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1185 | } |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1186 | // Paranoid |
| 1187 | vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1188 | |
| 1189 | /* Required payload size for the aligned chunk. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1190 | req_pszB = align_req_pszB(req_pszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1191 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1192 | /* Payload size to request for the big block that we will split up. */ |
| 1193 | base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1194 | |
| 1195 | /* Payload ptr for the block we are going to split. Note this |
| 1196 | changes a->bytes_on_loan; we save and restore it ourselves. */ |
| 1197 | saved_bytes_on_loan = a->bytes_on_loan; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1198 | base_p = VG_(arena_malloc) ( aid, base_pszB_req ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1199 | a->bytes_on_loan = saved_bytes_on_loan; |
| 1200 | |
| 1201 | /* Block ptr for the block we are going to split. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1202 | base_b = get_payload_block ( a, base_p ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1203 | |
| 1204 | /* Pointer to the payload of the aligned block we are going to |
| 1205 | return. This has to be suitably aligned. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1206 | align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a) |
| 1207 | + overhead_szB_hi(a), |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1208 | req_alignB ); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1209 | align_b = get_payload_block(a, align_p); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1210 | |
| 1211 | /* The block size of the fragment we will create. This must be big |
| 1212 | enough to actually create a fragment. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1213 | frag_bszB = align_b - base_b; |
| 1214 | |
| 1215 | vg_assert(frag_bszB >= min_useful_bszB(a)); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1216 | |
| 1217 | /* The actual payload size of the block we are going to split. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1218 | base_pszB_act = bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo(base_b))); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1219 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1220 | /* Create the fragment block, and put it back on the relevant free list. */ |
| 1221 | mkFreeBlock ( a, base_b, frag_bszB, |
| 1222 | pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1223 | |
| 1224 | /* Create the aligned block. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1225 | mkInuseBlock ( a, align_b, |
| 1226 | base_p + base_pszB_act |
| 1227 | + overhead_szB_hi(a) - (UByte*)align_b ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1228 | |
| 1229 | /* Final sanity checks. */ |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1230 | vg_assert( is_inuse_bszB(get_bszB_lo(get_payload_block(a, align_p))) ); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1231 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1232 | vg_assert(req_pszB |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1233 | <= |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1234 | bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo( |
| 1235 | get_payload_block(a, align_p)))) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1236 | ); |
| 1237 | |
| 1238 | a->bytes_on_loan |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1239 | += bszB_to_pszB(a, mk_plain_bszB(get_bszB_lo( |
| 1240 | get_payload_block(a, align_p)))); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1241 | if (a->bytes_on_loan > a->bytes_on_loan_max) |
| 1242 | a->bytes_on_loan_max = a->bytes_on_loan; |
| 1243 | |
| 1244 | # ifdef DEBUG_MALLOC |
nethercote | 885dd91 | 2004-08-03 23:14:00 +0000 | [diff] [blame] | 1245 | sanity_check_malloc_arena(aid); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1246 | # endif |
| 1247 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1248 | VGP_POPCC(VgpMalloc); |
| 1249 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1250 | vg_assert( (((Addr)align_p) % req_alignB) == 0 ); |
| 1251 | return align_p; |
| 1252 | } |
| 1253 | |
| 1254 | |
| 1255 | Int VG_(arena_payload_szB) ( ArenaId aid, void* ptr ) |
| 1256 | { |
| 1257 | Arena* a = arenaId_to_ArenaP(aid); |
| 1258 | Block* b = get_payload_block(a, ptr); |
| 1259 | return bszB_to_pszB(a, get_bszB_lo(b)); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1260 | } |
| 1261 | |
| 1262 | |
| 1263 | /*------------------------------------------------------------*/ |
| 1264 | /*--- Services layered on top of malloc/free. ---*/ |
| 1265 | /*------------------------------------------------------------*/ |
| 1266 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1267 | void* VG_(arena_calloc) ( ArenaId aid, Int alignB, Int nmemb, Int nbytes ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1268 | { |
| 1269 | Int i, size; |
| 1270 | UChar* p; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1271 | |
| 1272 | VGP_PUSHCC(VgpMalloc); |
| 1273 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1274 | size = nmemb * nbytes; |
sewardj | d0b9ac3 | 2002-05-01 00:10:28 +0000 | [diff] [blame] | 1275 | vg_assert(size >= 0); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1276 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1277 | if (alignB == VG_MIN_MALLOC_SZB) |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1278 | p = VG_(arena_malloc) ( aid, size ); |
| 1279 | else |
| 1280 | p = VG_(arena_malloc_aligned) ( aid, alignB, size ); |
| 1281 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1282 | for (i = 0; i < size; i++) p[i] = 0; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1283 | |
| 1284 | VGP_POPCC(VgpMalloc); |
| 1285 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1286 | return p; |
| 1287 | } |
| 1288 | |
| 1289 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1290 | void* VG_(arena_realloc) ( ArenaId aid, void* ptr, |
jseward | b1a26ae | 2004-03-14 03:06:37 +0000 | [diff] [blame] | 1291 | Int req_alignB, Int req_pszB ) |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1292 | { |
| 1293 | Arena* a; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1294 | Int old_bszB, old_pszB, i; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1295 | UChar *p_old, *p_new; |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1296 | Block* b; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1297 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1298 | VGP_PUSHCC(VgpMalloc); |
| 1299 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1300 | ensure_mm_init(); |
| 1301 | a = arenaId_to_ArenaP(aid); |
| 1302 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1303 | vg_assert(0 <= req_pszB && req_pszB < MAX_PSZB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1304 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1305 | b = get_payload_block(a, ptr); |
| 1306 | vg_assert(blockSane(a, b)); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1307 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1308 | old_bszB = get_bszB_lo(b); |
| 1309 | vg_assert(is_inuse_bszB(old_bszB)); |
| 1310 | old_bszB = mk_plain_bszB(old_bszB); |
| 1311 | old_pszB = bszB_to_pszB(a, old_bszB); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1312 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1313 | if (req_pszB <= old_pszB) { |
| 1314 | VGP_POPCC(VgpMalloc); |
| 1315 | return ptr; |
| 1316 | } |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1317 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1318 | if (req_alignB == VG_MIN_MALLOC_SZB) |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1319 | p_new = VG_(arena_malloc) ( aid, req_pszB ); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1320 | else { |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1321 | p_new = VG_(arena_malloc_aligned) ( aid, req_alignB, req_pszB ); |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1322 | } |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1323 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1324 | p_old = (UChar*)ptr; |
| 1325 | for (i = 0; i < old_pszB; i++) |
| 1326 | p_new[i] = p_old[i]; |
| 1327 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1328 | VG_(arena_free)(aid, p_old); |
| 1329 | |
| 1330 | VGP_POPCC(VgpMalloc); |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1331 | return p_new; |
| 1332 | } |
| 1333 | |
| 1334 | |
| 1335 | /*------------------------------------------------------------*/ |
nethercote | 996901a | 2004-08-03 13:29:09 +0000 | [diff] [blame] | 1336 | /*--- Tool-visible functions. ---*/ |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1337 | /*------------------------------------------------------------*/ |
| 1338 | |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1339 | // All just wrappers to avoid exposing arenas to tools. |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1340 | |
| 1341 | void* VG_(malloc) ( Int nbytes ) |
| 1342 | { |
nethercote | 60f5b82 | 2004-01-26 17:24:42 +0000 | [diff] [blame] | 1343 | return VG_(arena_malloc) ( VG_AR_TOOL, nbytes ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1344 | } |
| 1345 | |
| 1346 | void VG_(free) ( void* ptr ) |
| 1347 | { |
nethercote | 60f5b82 | 2004-01-26 17:24:42 +0000 | [diff] [blame] | 1348 | VG_(arena_free) ( VG_AR_TOOL, ptr ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1349 | } |
| 1350 | |
| 1351 | void* VG_(calloc) ( Int nmemb, Int nbytes ) |
| 1352 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1353 | return VG_(arena_calloc) ( VG_AR_TOOL, VG_MIN_MALLOC_SZB, nmemb, nbytes ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1354 | } |
| 1355 | |
| 1356 | void* VG_(realloc) ( void* ptr, Int size ) |
| 1357 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1358 | return VG_(arena_realloc) ( VG_AR_TOOL, ptr, VG_MIN_MALLOC_SZB, size ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1359 | } |
| 1360 | |
| 1361 | void* VG_(malloc_aligned) ( Int req_alignB, Int req_pszB ) |
| 1362 | { |
nethercote | 60f5b82 | 2004-01-26 17:24:42 +0000 | [diff] [blame] | 1363 | return VG_(arena_malloc_aligned) ( VG_AR_TOOL, req_alignB, req_pszB ); |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1364 | } |
| 1365 | |
| 1366 | |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1367 | void* VG_(cli_malloc) ( UInt align, Int nbytes ) |
| 1368 | { |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1369 | // 'align' should be valid by now. VG_(arena_malloc_aligned)() will |
| 1370 | // abort if it's not. |
| 1371 | if (VG_MIN_MALLOC_SZB == align) |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1372 | return VG_(arena_malloc) ( VG_AR_CLIENT, nbytes ); |
| 1373 | else |
sewardj | f1accbc | 2003-07-12 01:26:52 +0000 | [diff] [blame] | 1374 | return VG_(arena_malloc_aligned) ( VG_AR_CLIENT, align, nbytes ); |
njn | 3e88418 | 2003-04-15 13:03:23 +0000 | [diff] [blame] | 1375 | } |
| 1376 | |
| 1377 | void VG_(cli_free) ( void* p ) |
| 1378 | { |
| 1379 | VG_(arena_free) ( VG_AR_CLIENT, p ); |
| 1380 | } |
| 1381 | |
| 1382 | |
| 1383 | Bool VG_(addr_is_in_block)( Addr a, Addr start, UInt size ) |
| 1384 | { |
| 1385 | return (start - VG_(vg_malloc_redzone_szB) <= a |
| 1386 | && a < start + size + VG_(vg_malloc_redzone_szB)); |
| 1387 | } |
| 1388 | |
| 1389 | |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1390 | /*------------------------------------------------------------*/ |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1391 | /*--- The original test driver machinery. ---*/ |
| 1392 | /*------------------------------------------------------------*/ |
| 1393 | |
| 1394 | #if 0 |
| 1395 | |
| 1396 | #if 1 |
| 1397 | #define N_TEST_TRANSACTIONS 100000000 |
| 1398 | #define N_TEST_ARR 200000 |
| 1399 | #define M_TEST_MALLOC 1000 |
| 1400 | #else |
| 1401 | #define N_TEST_TRANSACTIONS 500000 |
| 1402 | #define N_TEST_ARR 30000 |
| 1403 | #define M_TEST_MALLOC 500 |
| 1404 | #endif |
| 1405 | |
| 1406 | |
| 1407 | void* test_arr[N_TEST_ARR]; |
| 1408 | |
| 1409 | int main ( int argc, char** argv ) |
| 1410 | { |
| 1411 | Int i, j, k, nbytes, qq; |
| 1412 | unsigned char* chp; |
njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1413 | Arena* a = &arena[VG_AR_CORE]; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1414 | srandom(1); |
| 1415 | for (i = 0; i < N_TEST_ARR; i++) |
| 1416 | test_arr[i] = NULL; |
| 1417 | |
| 1418 | for (i = 0; i < N_TEST_TRANSACTIONS; i++) { |
| 1419 | if (i % 50000 == 0) mallocSanityCheck(a); |
| 1420 | j = random() % N_TEST_ARR; |
| 1421 | if (test_arr[j]) { |
| 1422 | vg_free(a, test_arr[j]); |
| 1423 | test_arr[j] = NULL; |
| 1424 | } else { |
| 1425 | nbytes = 1 + random() % M_TEST_MALLOC; |
| 1426 | qq = random()%64; |
| 1427 | if (qq == 32) |
| 1428 | nbytes *= 17; |
| 1429 | else if (qq == 33) |
| 1430 | nbytes = 0; |
| 1431 | test_arr[j] |
| 1432 | = (i % 17) == 0 |
| 1433 | ? vg_memalign(a, nbytes, 1<< (3+(random()%10))) |
| 1434 | : vg_malloc( a, nbytes ); |
| 1435 | chp = test_arr[j]; |
| 1436 | for (k = 0; k < nbytes; k++) |
| 1437 | chp[k] = (unsigned char)(k + 99); |
| 1438 | } |
| 1439 | } |
| 1440 | |
| 1441 | |
| 1442 | for (i = 0; i < N_TEST_ARR; i++) { |
| 1443 | if (test_arr[i]) { |
| 1444 | vg_free(a, test_arr[i]); |
| 1445 | test_arr[i] = NULL; |
| 1446 | } |
| 1447 | } |
| 1448 | mallocSanityCheck(a); |
| 1449 | |
| 1450 | fprintf(stderr, "ALL DONE\n"); |
| 1451 | |
| 1452 | show_arena_stats(a); |
| 1453 | fprintf(stderr, "%d max useful, %d bytes mmap'd (%4.1f%%), %d useful\n", |
| 1454 | a->bytes_on_loan_max, |
| 1455 | a->bytes_mmaped, |
nethercote | 2d5b816 | 2004-08-11 09:40:52 +0000 | [diff] [blame] | 1456 | 100.0 * (double)a->bytes_on_loan_max / (double)a->bytes_mmaped, |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1457 | a->bytes_on_loan ); |
| 1458 | |
| 1459 | return 0; |
| 1460 | } |
| 1461 | #endif /* 0 */ |
| 1462 | |
| 1463 | |
| 1464 | /*--------------------------------------------------------------------*/ |
| 1465 | /*--- end vg_malloc2.c ---*/ |
| 1466 | /*--------------------------------------------------------------------*/ |