J. Duke | 319a3b9 | 2007-12-01 00:00:00 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * |
| 8 | * - Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * |
| 11 | * - Redistributions in binary form must reproduce the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer in the |
| 13 | * documentation and/or other materials provided with the distribution. |
| 14 | * |
| 15 | * - Neither the name of Sun Microsystems nor the names of its |
| 16 | * contributors may be used to endorse or promote products derived |
| 17 | * from this software without specific prior written permission. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS |
| 20 | * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
| 21 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
| 23 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 24 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 25 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 26 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 27 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 28 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 29 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | */ |
| 31 | |
| 32 | /* Lookup Table of generic elements. */ |
| 33 | |
| 34 | /* |
| 35 | * Each table has a unique lock, all accesses are protected. |
| 36 | * |
| 37 | * Table elements are identified with a 32bit unsigned int. |
| 38 | * (Also see HARE trick below, which makes the TableIndex unique per table). |
| 39 | * |
| 40 | * Each element has a key (N bytes) and possible additional info. |
| 41 | * |
| 42 | * Two elements with the same key should be the same element. |
| 43 | * |
| 44 | * The storage for the Key and Info cannot move, the table itself can. |
| 45 | * |
| 46 | * The hash table will only be allocated if we have keys, and will resize |
| 47 | * when the table needs to resize. The hash buckets just provide the |
| 48 | * reference to the first TableIndex in the hash bucket, the next |
| 49 | * field of the TableElement takes you to the next item in the hash |
| 50 | * bucket. Lookups will drift the looked up item to the head of the |
| 51 | * list. |
| 52 | * |
| 53 | * The full 32bit hashcode and key length is saved for comparisons, the |
| 54 | * last thing done is the actual comparison of the Key contents with |
| 55 | * keys_equal(). |
| 56 | * |
| 57 | * Freed elements (not many tables actually free items) are managed with |
| 58 | * a bit vector and a low index where a freed element might be found. |
| 59 | * Bytes are inspected until a non-zero byte indicates a freed bit is |
| 60 | * set. A count of freed elements is also kept. |
| 61 | * |
| 62 | */ |
| 63 | |
| 64 | #include "hprof.h" |
| 65 | |
| 66 | /* Macros for bit vectors: unsigned char 2^3==8 OR unsigned int 2^5==32 */ |
| 67 | |
| 68 | #define BV_CHUNK_POWER_2 3 /* 2 to this power == BV_CHUNK_BITSIZE */ |
| 69 | #define BV_CHUNK_TYPE unsigned char |
| 70 | |
| 71 | #define BV_CHUNK_BITSIZE (((int)sizeof(BV_CHUNK_TYPE))<<3) /* x8 */ |
| 72 | #define BV_CHUNK_INDEX_MASK ( (1 << BV_CHUNK_POWER_2) - 1 ) |
| 73 | #define BV_ELEMENT_COUNT(nelems) ((((nelems+1)) >> BV_CHUNK_POWER_2) + 1) |
| 74 | |
| 75 | #define BV_CHUNK_ROUND(i) ((i) & ~(BV_CHUNK_INDEX_MASK)) |
| 76 | #define BV_CHUNK(ptr, i) \ |
| 77 | (((BV_CHUNK_TYPE*)(ptr))[(i) >> BV_CHUNK_POWER_2]) |
| 78 | #define BV_CHUNK_MASK(i) \ |
| 79 | (1 << ((i) & BV_CHUNK_INDEX_MASK)) |
| 80 | |
| 81 | /* Hash code value */ |
| 82 | |
| 83 | typedef unsigned HashCode; |
| 84 | |
| 85 | /* Basic key for an element. What makes the element unique. */ |
| 86 | |
| 87 | typedef struct TableKey { |
| 88 | void *ptr; /* Pointer to arbitrary data that forms the key. */ |
| 89 | int len; /* Length in bytes of this key. */ |
| 90 | } TableKey; |
| 91 | |
| 92 | /* Basic TableElement (but only allocated if keys are used) */ |
| 93 | |
| 94 | typedef struct TableElement { |
| 95 | TableKey key; /* The element key. */ |
| 96 | HashCode hcode; /* The full 32bit hashcode for the key. */ |
| 97 | TableIndex next; /* The next TableElement in the hash bucket chain. */ |
| 98 | void *info; /* Info pointer */ |
| 99 | } TableElement; |
| 100 | |
| 101 | /* Generic Lookup Table structure */ |
| 102 | |
| 103 | typedef struct LookupTable { |
| 104 | char name[48]; /* Name of table. */ |
| 105 | void *table; /* Pointer to array of elements. */ |
| 106 | TableIndex *hash_buckets; /* Pointer to hash bucket chains. */ |
| 107 | Blocks *info_blocks; /* Blocks space for info */ |
| 108 | Blocks *key_blocks; /* Blocks space for keys */ |
| 109 | TableIndex next_index; /* Next element available. */ |
| 110 | TableIndex table_size; /* Current size of table. */ |
| 111 | TableIndex table_incr; /* Suggested increment size. */ |
| 112 | TableIndex hash_bucket_count; /* Number of hash buckets. */ |
| 113 | int elem_size; /* Size of element. */ |
| 114 | int info_size; /* Size of info structure. */ |
| 115 | void *freed_bv; /* Freed element bit vector */ |
| 116 | int freed_count; /* Count of freed'd elements */ |
| 117 | TableIndex freed_start; /* First freed in table */ |
| 118 | int resizes; /* Count of table resizes done. */ |
| 119 | unsigned bucket_walks; /* Count of bucket walks. */ |
| 120 | jrawMonitorID lock; /* Lock for table access. */ |
| 121 | SerialNumber serial_num; /* Table serial number. */ |
| 122 | TableIndex hare; /* Rabbit (HARE) trick. */ |
| 123 | } LookupTable; |
| 124 | |
| 125 | /* To get a pointer to an element, regardless of element size. */ |
| 126 | |
| 127 | #define ELEMENT_PTR(ltable, i) \ |
| 128 | ((void*)(((char*)(ltable)->table) + (ltable)->elem_size * (i))) |
| 129 | |
| 130 | /* Sanity, check all the time. */ |
| 131 | |
| 132 | #define SANITY_CHECK(condition) ( (condition) ? (void)0 : \ |
| 133 | HPROF_ERROR(JNI_FALSE, "SANITY IN QUESTION: " #condition)) |
| 134 | |
| 135 | /* To see if an index is valid. */ |
| 136 | |
| 137 | #define SANITY_CHECK_INDEX(ltable,i) SANITY_CHECK((i) < ltable->next_index) |
| 138 | |
| 139 | /* Small rabbits (hares) can be hidden in the index value returned. |
| 140 | * Only the right rabbits are allowed in certain pens (LookupTables). |
| 141 | * When herding rabbits it's important to keep them separate, |
| 142 | * there are lots of rabbits, all different kinds and sizes, |
| 143 | * keeping them all separate is important to avoid cross breeding. |
| 144 | */ |
| 145 | |
| 146 | #define _SANITY_USE_HARE |
| 147 | #ifdef _SANITY_USE_HARE |
| 148 | #define SANITY_ADD_HARE(i,hare) (SANITY_REMOVE_HARE(i) | (hare)) |
| 149 | #define SANITY_REMOVE_HARE(i) ((i) & 0x0FFFFFFF) |
| 150 | #define SANITY_CHECK_HARE(i,hare) SANITY_CHECK(SANITY_ADD_HARE(i,hare)==(i)) |
| 151 | #else |
| 152 | #define SANITY_ADD_HARE(i,hare) (i) |
| 153 | #define SANITY_REMOVE_HARE(i) (i) |
| 154 | #define SANITY_CHECK_HARE(i,hare) |
| 155 | #endif |
| 156 | |
| 157 | static jrawMonitorID |
| 158 | lock_create(char *name) |
| 159 | { |
| 160 | jrawMonitorID stanley; |
| 161 | |
| 162 | stanley = createRawMonitor(name); |
| 163 | return stanley; |
| 164 | } |
| 165 | |
| 166 | static void |
| 167 | lock_destroy(jrawMonitorID stanley) |
| 168 | { |
| 169 | if ( stanley != NULL ) { |
| 170 | destroyRawMonitor(stanley); |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | static void |
| 175 | lock_enter(jrawMonitorID stanley) |
| 176 | { |
| 177 | if ( stanley != NULL ) { |
| 178 | rawMonitorEnter(stanley); |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | static void |
| 183 | lock_exit(jrawMonitorID stanley) |
| 184 | { |
| 185 | if ( stanley != NULL ) { |
| 186 | rawMonitorExit(stanley); |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | static void |
| 191 | get_key(LookupTable *ltable, TableIndex index, void **pkey_ptr, int *pkey_len) |
| 192 | { |
| 193 | *pkey_ptr = ((TableElement*)ELEMENT_PTR(ltable,index))->key.ptr; |
| 194 | *pkey_len = ((TableElement*)ELEMENT_PTR(ltable,index))->key.len; |
| 195 | } |
| 196 | |
| 197 | static void * |
| 198 | get_info(LookupTable *ltable, TableIndex index) |
| 199 | { |
| 200 | TableElement *element; |
| 201 | |
| 202 | if ( ltable->info_size == 0 ) { |
| 203 | return NULL; |
| 204 | } |
| 205 | element = (TableElement*)ELEMENT_PTR(ltable,index); |
| 206 | return element->info; |
| 207 | } |
| 208 | |
| 209 | static void |
| 210 | hash_out(LookupTable *ltable, TableIndex index) |
| 211 | { |
| 212 | if ( ltable->hash_bucket_count > 0 ) { |
| 213 | TableElement *element; |
| 214 | TableElement *prev_e; |
| 215 | TableIndex bucket; |
| 216 | TableIndex i; |
| 217 | |
| 218 | element = (TableElement*)ELEMENT_PTR(ltable,index); |
| 219 | bucket = (element->hcode % ltable->hash_bucket_count); |
| 220 | i = ltable->hash_buckets[bucket]; |
| 221 | HPROF_ASSERT(i!=0); |
| 222 | prev_e = NULL; |
| 223 | while ( i != 0 && i != index ) { |
| 224 | prev_e = (TableElement*)ELEMENT_PTR(ltable,i); |
| 225 | i = prev_e->next; |
| 226 | } |
| 227 | HPROF_ASSERT(i==index); |
| 228 | if ( prev_e == NULL ) { |
| 229 | ltable->hash_buckets[bucket] = element->next; |
| 230 | } else { |
| 231 | prev_e->next = element->next; |
| 232 | } |
| 233 | element->next = 0; |
| 234 | element->hcode = 0; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | static jboolean |
| 239 | is_freed_entry(LookupTable *ltable, TableIndex index) |
| 240 | { |
| 241 | if ( ltable->freed_bv == NULL ) { |
| 242 | return JNI_FALSE; |
| 243 | } |
| 244 | if ( ( BV_CHUNK(ltable->freed_bv, index) & BV_CHUNK_MASK(index) ) != 0 ) { |
| 245 | return JNI_TRUE; |
| 246 | } |
| 247 | return JNI_FALSE; |
| 248 | } |
| 249 | |
| 250 | static void |
| 251 | set_freed_bit(LookupTable *ltable, TableIndex index) |
| 252 | { |
| 253 | void *p; |
| 254 | |
| 255 | HPROF_ASSERT(!is_freed_entry(ltable, index)); |
| 256 | p = ltable->freed_bv; |
| 257 | if ( p == NULL ) { |
| 258 | int size; |
| 259 | |
| 260 | /* First time for a free */ |
| 261 | HPROF_ASSERT(ltable->freed_start==0); |
| 262 | HPROF_ASSERT(ltable->freed_start==0); |
| 263 | size = BV_ELEMENT_COUNT(ltable->table_size); |
| 264 | p = HPROF_MALLOC(size*(int)sizeof(BV_CHUNK_TYPE)); |
| 265 | ltable->freed_bv = p; |
| 266 | (void)memset(p, 0, size*(int)sizeof(BV_CHUNK_TYPE)); |
| 267 | } |
| 268 | BV_CHUNK(p, index) |= BV_CHUNK_MASK(index); |
| 269 | ltable->freed_count++; |
| 270 | if ( ltable->freed_count == 1 ) { |
| 271 | /* Set freed_start for first time. */ |
| 272 | HPROF_ASSERT(ltable->freed_start==0); |
| 273 | ltable->freed_start = index; |
| 274 | } else if ( index < ltable->freed_start ) { |
| 275 | /* Set freed_start to smaller value so we can be smart about search */ |
| 276 | HPROF_ASSERT(ltable->freed_start!=0); |
| 277 | ltable->freed_start = index; |
| 278 | } |
| 279 | HPROF_ASSERT(ltable->freed_start!=0); |
| 280 | HPROF_ASSERT(ltable->freed_start < ltable->next_index); |
| 281 | HPROF_ASSERT(is_freed_entry(ltable, index)); |
| 282 | } |
| 283 | |
| 284 | static TableIndex |
| 285 | find_freed_entry(LookupTable *ltable) |
| 286 | { |
| 287 | if ( ltable->freed_count > 0 ) { |
| 288 | TableIndex i; |
| 289 | TableIndex istart; |
| 290 | void *p; |
| 291 | BV_CHUNK_TYPE chunk; |
| 292 | |
| 293 | HPROF_ASSERT(BV_CHUNK_BITSIZE==(1<<BV_CHUNK_POWER_2)); |
| 294 | |
| 295 | p = ltable->freed_bv; |
| 296 | HPROF_ASSERT(p!=NULL); |
| 297 | |
| 298 | /* Go to beginning of chunk */ |
| 299 | HPROF_ASSERT(ltable->freed_start!=0); |
| 300 | HPROF_ASSERT(ltable->freed_start < ltable->next_index); |
| 301 | istart = BV_CHUNK_ROUND(ltable->freed_start); |
| 302 | |
| 303 | /* Find chunk with any bit set */ |
| 304 | chunk = 0; |
| 305 | for( ; istart < ltable->next_index ; istart += BV_CHUNK_BITSIZE ) { |
| 306 | chunk = BV_CHUNK(p, istart); |
| 307 | if ( chunk != 0 ) { |
| 308 | break; |
| 309 | } |
| 310 | } |
| 311 | HPROF_ASSERT(chunk!=0); |
| 312 | HPROF_ASSERT(chunk==BV_CHUNK(p,istart)); |
| 313 | HPROF_ASSERT(istart < ltable->next_index); |
| 314 | |
| 315 | /* Find bit in chunk and return index of freed item */ |
| 316 | for( i = istart ; i < (istart+BV_CHUNK_BITSIZE) ; i++) { |
| 317 | BV_CHUNK_TYPE mask; |
| 318 | |
| 319 | mask = BV_CHUNK_MASK(i); |
| 320 | if ( (chunk & mask) != 0 ) { |
| 321 | HPROF_ASSERT(chunk==BV_CHUNK(p,i)); |
| 322 | chunk &= ~mask; |
| 323 | BV_CHUNK(p, i) = chunk; |
| 324 | ltable->freed_count--; |
| 325 | HPROF_ASSERT(i < ltable->next_index); |
| 326 | if ( ltable->freed_count > 0 ) { |
| 327 | /* Set freed_start so we can be smart about search */ |
| 328 | HPROF_ASSERT((i+1) < ltable->next_index); |
| 329 | ltable->freed_start = i+1; |
| 330 | } else { |
| 331 | /* Clear freed_start because there are no freed entries */ |
| 332 | ltable->freed_start = 0; |
| 333 | } |
| 334 | HPROF_ASSERT(!is_freed_entry(ltable, i)); |
| 335 | return i; |
| 336 | } |
| 337 | } |
| 338 | HPROF_ASSERT(0); |
| 339 | } |
| 340 | return 0; |
| 341 | } |
| 342 | |
| 343 | static void |
| 344 | free_entry(LookupTable *ltable, TableIndex index) |
| 345 | { |
| 346 | set_freed_bit(ltable, index); |
| 347 | hash_out(ltable, index); |
| 348 | } |
| 349 | |
| 350 | /* Fairly generic hash code generator (not a hash table index) */ |
| 351 | static HashCode |
| 352 | hashcode(void *key_ptr, int key_len) |
| 353 | { |
| 354 | unsigned char * p; |
| 355 | HashCode hcode; |
| 356 | int i; |
| 357 | |
| 358 | hcode = 0; |
| 359 | if ( key_ptr == NULL || key_len == 0 ) { |
| 360 | return hcode; |
| 361 | } |
| 362 | i = 0; |
| 363 | p = (unsigned char*)key_ptr; |
| 364 | for ( ; i < key_len-3 ; i += 4 ) { |
| 365 | /* Do a little loop unrolling */ |
| 366 | hcode += ( |
| 367 | ( (unsigned)(p[i]) << 24 ) | |
| 368 | ( (unsigned)(p[i+1]) << 16 ) | |
| 369 | ( (unsigned)(p[i+2]) << 8 ) | |
| 370 | ( (unsigned)(p[i+3]) ) |
| 371 | ); |
| 372 | } |
| 373 | for ( ; i < key_len ; i++ ) { |
| 374 | hcode += (unsigned)(p[i]); |
| 375 | } |
| 376 | return hcode; |
| 377 | } |
| 378 | |
| 379 | static void |
| 380 | hash_in(LookupTable *ltable, TableIndex index, HashCode hcode) |
| 381 | { |
| 382 | if ( ltable->hash_bucket_count > 0 ) { |
| 383 | TableElement *element; |
| 384 | TableIndex bucket; |
| 385 | |
| 386 | bucket = (hcode % ltable->hash_bucket_count); |
| 387 | element = (TableElement*)ELEMENT_PTR(ltable, index); |
| 388 | element->hcode = hcode; |
| 389 | element->next = ltable->hash_buckets[bucket]; |
| 390 | ltable->hash_buckets[bucket] = index; |
| 391 | } |
| 392 | } |
| 393 | |
| 394 | static void |
| 395 | resize_hash_buckets(LookupTable *ltable) |
| 396 | { |
| 397 | /* Don't want to do this too often. */ |
| 398 | |
| 399 | /* Hash table needs resizing when it's smaller than 1/16 the number of |
| 400 | * elements used in the table. This is just a guess. |
| 401 | */ |
| 402 | if ( ( ltable->hash_bucket_count < (ltable->next_index >> 4) ) |
| 403 | && ( ltable->hash_bucket_count > 0 ) |
| 404 | && ( ( ltable->resizes % 10 ) == 0 ) |
| 405 | && ( ltable->bucket_walks > 1000*ltable->hash_bucket_count ) |
| 406 | ) { |
| 407 | int old_size; |
| 408 | int new_size; |
| 409 | TableIndex *new_buckets; |
| 410 | TableIndex *old_buckets; |
| 411 | int bucket; |
| 412 | |
| 413 | /* Increase size of hash_buckets array, and rehash all elements */ |
| 414 | |
| 415 | LOG3("Table resize", ltable->name, ltable->resizes); |
| 416 | |
| 417 | old_size = ltable->hash_bucket_count; |
| 418 | old_buckets = ltable->hash_buckets; |
| 419 | new_size = (ltable->next_index >> 3); /* 1/8 current used count */ |
| 420 | SANITY_CHECK(new_size > old_size); |
| 421 | new_buckets = HPROF_MALLOC(new_size*(int)sizeof(TableIndex)); |
| 422 | (void)memset(new_buckets, 0, new_size*(int)sizeof(TableIndex)); |
| 423 | ltable->hash_bucket_count = new_size; |
| 424 | ltable->hash_buckets = new_buckets; |
| 425 | |
| 426 | for ( bucket = 0 ; bucket < old_size ; bucket++ ) { |
| 427 | TableIndex index; |
| 428 | |
| 429 | index = old_buckets[bucket]; |
| 430 | while ( index != 0 ) { |
| 431 | TableElement *element; |
| 432 | TableIndex next; |
| 433 | |
| 434 | element = (TableElement*)ELEMENT_PTR(ltable, index); |
| 435 | next = element->next; |
| 436 | element->next = 0; |
| 437 | hash_in(ltable, index, element->hcode); |
| 438 | index = next; |
| 439 | } |
| 440 | } |
| 441 | HPROF_FREE(old_buckets); |
| 442 | |
| 443 | ltable->bucket_walks = 0; |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | static void |
| 448 | resize(LookupTable *ltable) |
| 449 | { |
| 450 | int old_size; |
| 451 | int new_size; |
| 452 | void *old_table; |
| 453 | void *new_table; |
| 454 | int nbytes; |
| 455 | int obytes; |
| 456 | |
| 457 | LOG3("Table resize", ltable->name, ltable->resizes); |
| 458 | |
| 459 | /* Adjust increment on every resize |
| 460 | * Minimum is 1/4 the size of the current table or 512. |
| 461 | */ |
| 462 | old_size = ltable->table_size; |
| 463 | if ( ltable->table_incr < (unsigned)(old_size >> 2) ) { |
| 464 | ltable->table_incr = (old_size >> 2); |
| 465 | } |
| 466 | if ( ltable->table_incr < 512 ) { |
| 467 | ltable->table_incr = 512; |
| 468 | } |
| 469 | new_size = old_size + ltable->table_incr; |
| 470 | |
| 471 | /* Basic table element array */ |
| 472 | obytes = old_size * ltable->elem_size; |
| 473 | nbytes = new_size * ltable->elem_size; |
| 474 | old_table = ltable->table; |
| 475 | new_table = HPROF_MALLOC(nbytes); |
| 476 | (void)memcpy(new_table, old_table, obytes); |
| 477 | (void)memset(((char*)new_table)+obytes, 0, nbytes-obytes); |
| 478 | ltable->table = new_table; |
| 479 | ltable->table_size = new_size; |
| 480 | HPROF_FREE(old_table); |
| 481 | |
| 482 | /* Then bit vector for freed entries */ |
| 483 | if ( ltable->freed_bv != NULL ) { |
| 484 | void *old_bv; |
| 485 | void *new_bv; |
| 486 | |
| 487 | obytes = BV_ELEMENT_COUNT(old_size)*(int)sizeof(BV_CHUNK_TYPE); |
| 488 | nbytes = BV_ELEMENT_COUNT(new_size)*(int)sizeof(BV_CHUNK_TYPE); |
| 489 | old_bv = ltable->freed_bv; |
| 490 | new_bv = HPROF_MALLOC(nbytes); |
| 491 | (void)memcpy(new_bv, old_bv, obytes); |
| 492 | (void)memset(((char*)new_bv)+obytes, 0, nbytes-obytes); |
| 493 | ltable->freed_bv = new_bv; |
| 494 | HPROF_FREE(old_bv); |
| 495 | } |
| 496 | |
| 497 | /* Check to see if the hash table needs resizing */ |
| 498 | resize_hash_buckets(ltable); |
| 499 | |
| 500 | ltable->resizes++; |
| 501 | } |
| 502 | |
| 503 | static jboolean |
| 504 | keys_equal(void *key_ptr1, void *key_ptr2, int key_len) |
| 505 | { |
| 506 | unsigned char * p1; |
| 507 | unsigned char * p2; |
| 508 | int i; |
| 509 | |
| 510 | if ( key_len == 0 ) { |
| 511 | return JNI_TRUE; |
| 512 | } |
| 513 | |
| 514 | /* We know these are aligned because we malloc'd them. */ |
| 515 | |
| 516 | /* Compare word by word, then byte by byte */ |
| 517 | p1 = (unsigned char*)key_ptr1; |
| 518 | p2 = (unsigned char*)key_ptr2; |
| 519 | for ( i = 0 ; i < key_len-3 ; i += 4 ) { |
| 520 | /*LINTED*/ |
| 521 | if ( *(unsigned*)(p1+i) != *(unsigned*)(p2+i) ) { |
| 522 | return JNI_FALSE; |
| 523 | } |
| 524 | } |
| 525 | for ( ; i < key_len ; i++ ) { |
| 526 | if ( p1[i] != p2[i] ) { |
| 527 | return JNI_FALSE; |
| 528 | } |
| 529 | } |
| 530 | return JNI_TRUE; |
| 531 | } |
| 532 | |
| 533 | static TableIndex |
| 534 | find_entry(LookupTable *ltable, void *key_ptr, int key_len, HashCode hcode) |
| 535 | { |
| 536 | TableIndex index; |
| 537 | |
| 538 | HPROF_ASSERT(ltable!=NULL); |
| 539 | |
| 540 | index = 0; |
| 541 | if ( ltable->hash_bucket_count > 0 ) { |
| 542 | TableIndex bucket; |
| 543 | TableIndex prev_index; |
| 544 | |
| 545 | HPROF_ASSERT(key_ptr!=NULL); |
| 546 | HPROF_ASSERT(key_len>0); |
| 547 | prev_index = 0; |
| 548 | bucket = (hcode % ltable->hash_bucket_count); |
| 549 | index = ltable->hash_buckets[bucket]; |
| 550 | while ( index != 0 ) { |
| 551 | TableElement *element; |
| 552 | TableElement *prev_element; |
| 553 | |
| 554 | element = (TableElement*)ELEMENT_PTR(ltable, index); |
| 555 | if ( hcode == element->hcode && |
| 556 | key_len == element->key.len && |
| 557 | keys_equal(key_ptr, element->key.ptr, key_len) ) { |
| 558 | /* Place this guy at the head of the bucket list */ |
| 559 | if ( prev_index != 0 ) { |
| 560 | prev_element = (TableElement*)ELEMENT_PTR(ltable, prev_index); |
| 561 | prev_element->next = element->next; |
| 562 | element->next = ltable->hash_buckets[bucket]; |
| 563 | ltable->hash_buckets[bucket] = index; |
| 564 | } |
| 565 | break; |
| 566 | } |
| 567 | prev_index = index; |
| 568 | index = element->next; |
| 569 | ltable->bucket_walks++; |
| 570 | } |
| 571 | } |
| 572 | return index; |
| 573 | } |
| 574 | |
| 575 | static TableIndex |
| 576 | setup_new_entry(LookupTable *ltable, void *key_ptr, int key_len, void *info_ptr) |
| 577 | { |
| 578 | TableIndex index; |
| 579 | TableElement *element; |
| 580 | void *info; |
| 581 | void *dup_key; |
| 582 | |
| 583 | /* Assume we need new allocations for key and info */ |
| 584 | dup_key = NULL; |
| 585 | info = NULL; |
| 586 | |
| 587 | /* Look for a freed element */ |
| 588 | index = 0; |
| 589 | if ( ltable->freed_count > 0 ) { |
| 590 | index = find_freed_entry(ltable); |
| 591 | } |
| 592 | if ( index != 0 ) { |
| 593 | int old_key_len; |
| 594 | |
| 595 | /* Found a freed element, re-use what we can but clean it up. */ |
| 596 | element = (TableElement*)ELEMENT_PTR(ltable, index); |
| 597 | dup_key = element->key.ptr; |
| 598 | old_key_len = element->key.len; |
| 599 | info = element->info; |
| 600 | (void)memset(element, 0, ltable->elem_size); |
| 601 | |
| 602 | /* Toss the key space if size is too small to hold new key */ |
| 603 | if ( key_ptr != NULL ) { |
| 604 | if ( old_key_len < key_len ) { |
| 605 | /* This could leak space in the Blocks if keys are variable |
| 606 | * in size AND the table does frees of elements. |
| 607 | */ |
| 608 | dup_key = NULL; |
| 609 | } |
| 610 | } |
| 611 | } else { |
| 612 | |
| 613 | /* Brand new table element */ |
| 614 | if ( ltable->next_index >= ltable->table_size ) { |
| 615 | resize(ltable); |
| 616 | } |
| 617 | index = ltable->next_index++; |
| 618 | element = (TableElement*)ELEMENT_PTR(ltable, index); |
| 619 | } |
| 620 | |
| 621 | /* Setup info area */ |
| 622 | if ( ltable->info_size > 0 ) { |
| 623 | if ( info == NULL ) { |
| 624 | info = blocks_alloc(ltable->info_blocks, ltable->info_size); |
| 625 | } |
| 626 | if ( info_ptr==NULL ) { |
| 627 | (void)memset(info, 0, ltable->info_size); |
| 628 | } else { |
| 629 | (void)memcpy(info, info_ptr, ltable->info_size); |
| 630 | } |
| 631 | } |
| 632 | |
| 633 | /* Setup key area if one was provided */ |
| 634 | if ( key_ptr != NULL ) { |
| 635 | if ( dup_key == NULL ) { |
| 636 | dup_key = blocks_alloc(ltable->key_blocks, key_len); |
| 637 | } |
| 638 | (void)memcpy(dup_key, key_ptr, key_len); |
| 639 | } |
| 640 | |
| 641 | /* Fill in element */ |
| 642 | element->key.ptr = dup_key; |
| 643 | element->key.len = key_len; |
| 644 | element->info = info; |
| 645 | |
| 646 | return index; |
| 647 | } |
| 648 | |
| 649 | LookupTable * |
| 650 | table_initialize(const char *name, int size, int incr, int bucket_count, |
| 651 | int info_size) |
| 652 | { |
| 653 | LookupTable * ltable; |
| 654 | char lock_name[80]; |
| 655 | int elem_size; |
| 656 | int key_size; |
| 657 | |
| 658 | HPROF_ASSERT(name!=NULL); |
| 659 | HPROF_ASSERT(size>0); |
| 660 | HPROF_ASSERT(incr>0); |
| 661 | HPROF_ASSERT(bucket_count>=0); |
| 662 | HPROF_ASSERT(info_size>=0); |
| 663 | |
| 664 | key_size = 1; |
| 665 | ltable = (LookupTable *)HPROF_MALLOC((int)sizeof(LookupTable)); |
| 666 | (void)memset(ltable, 0, (int)sizeof(LookupTable)); |
| 667 | |
| 668 | (void)strncpy(ltable->name, name, sizeof(ltable->name)); |
| 669 | |
| 670 | elem_size = (int)sizeof(TableElement); |
| 671 | |
| 672 | ltable->next_index = 1; /* Never use index 0 */ |
| 673 | ltable->table_size = size; |
| 674 | ltable->table_incr = incr; |
| 675 | ltable->hash_bucket_count = bucket_count; |
| 676 | ltable->elem_size = elem_size; |
| 677 | ltable->info_size = info_size; |
| 678 | if ( info_size > 0 ) { |
| 679 | ltable->info_blocks = blocks_init(8, info_size, incr); |
| 680 | } |
| 681 | if ( key_size > 0 ) { |
| 682 | ltable->key_blocks = blocks_init(8, key_size, incr); |
| 683 | } |
| 684 | ltable->table = HPROF_MALLOC(size * elem_size); |
| 685 | (void)memset(ltable->table, 0, size * elem_size); |
| 686 | if ( bucket_count > 0 ) { |
| 687 | int nbytes; |
| 688 | |
| 689 | nbytes = (int)(bucket_count*sizeof(TableIndex)); |
| 690 | ltable->hash_buckets = (TableIndex*)HPROF_MALLOC(nbytes); |
| 691 | (void)memset(ltable->hash_buckets, 0, nbytes); |
| 692 | } |
| 693 | |
| 694 | (void)md_snprintf(lock_name, sizeof(lock_name), |
| 695 | "HPROF %s table lock", name); |
| 696 | lock_name[sizeof(lock_name)-1] = 0; |
| 697 | ltable->lock = lock_create(lock_name); |
| 698 | ltable->serial_num = gdata->table_serial_number_counter++; |
| 699 | ltable->hare = (ltable->serial_num << 28); |
| 700 | |
| 701 | LOG3("Table initialized", ltable->name, ltable->table_size); |
| 702 | return ltable; |
| 703 | } |
| 704 | |
| 705 | int |
| 706 | table_element_count(LookupTable *ltable) |
| 707 | { |
| 708 | int nelems; |
| 709 | |
| 710 | HPROF_ASSERT(ltable!=NULL); |
| 711 | |
| 712 | lock_enter(ltable->lock); { |
| 713 | nelems = ltable->next_index-1; |
| 714 | } lock_exit(ltable->lock); |
| 715 | |
| 716 | return nelems; |
| 717 | } |
| 718 | |
| 719 | void |
| 720 | table_free_entry(LookupTable *ltable, TableIndex index) |
| 721 | { |
| 722 | HPROF_ASSERT(ltable!=NULL); |
| 723 | SANITY_CHECK_HARE(index, ltable->hare); |
| 724 | index = SANITY_REMOVE_HARE(index); |
| 725 | SANITY_CHECK_INDEX(ltable, index); |
| 726 | |
| 727 | lock_enter(ltable->lock); { |
| 728 | HPROF_ASSERT(!is_freed_entry(ltable, index)); |
| 729 | free_entry(ltable, index); |
| 730 | } lock_exit(ltable->lock); |
| 731 | } |
| 732 | |
| 733 | void |
| 734 | table_walk_items(LookupTable *ltable, LookupTableIterator func, void* arg) |
| 735 | { |
| 736 | if ( ltable == NULL || ltable->next_index <= 1 ) { |
| 737 | return; |
| 738 | } |
| 739 | HPROF_ASSERT(func!=NULL); |
| 740 | |
| 741 | lock_enter(ltable->lock); { |
| 742 | TableIndex index; |
| 743 | int fcount; |
| 744 | |
| 745 | LOG3("table_walk_items() count+free", ltable->name, ltable->next_index); |
| 746 | fcount = 0; |
| 747 | for ( index = 1 ; index < ltable->next_index ; index++ ) { |
| 748 | if ( ! is_freed_entry(ltable, index) ) { |
| 749 | void *key_ptr; |
| 750 | int key_len; |
| 751 | void *info; |
| 752 | |
| 753 | get_key(ltable, index, &key_ptr, &key_len); |
| 754 | info = get_info(ltable, index); |
| 755 | (*func)(SANITY_ADD_HARE(index, ltable->hare), key_ptr, key_len, info, arg); |
| 756 | if ( is_freed_entry(ltable, index) ) { |
| 757 | fcount++; |
| 758 | } |
| 759 | } else { |
| 760 | fcount++; |
| 761 | } |
| 762 | } |
| 763 | LOG3("table_walk_items() count-free", ltable->name, ltable->next_index); |
| 764 | HPROF_ASSERT(fcount==ltable->freed_count); |
| 765 | } lock_exit(ltable->lock); |
| 766 | } |
| 767 | |
| 768 | void |
| 769 | table_cleanup(LookupTable *ltable, LookupTableIterator func, void *arg) |
| 770 | { |
| 771 | if ( ltable == NULL ) { |
| 772 | return; |
| 773 | } |
| 774 | |
| 775 | if ( func != NULL ) { |
| 776 | table_walk_items(ltable, func, arg); |
| 777 | } |
| 778 | |
| 779 | lock_enter(ltable->lock); { |
| 780 | |
| 781 | HPROF_FREE(ltable->table); |
| 782 | if ( ltable->hash_buckets != NULL ) { |
| 783 | HPROF_FREE(ltable->hash_buckets); |
| 784 | } |
| 785 | if ( ltable->freed_bv != NULL ) { |
| 786 | HPROF_FREE(ltable->freed_bv); |
| 787 | } |
| 788 | if ( ltable->info_blocks != NULL ) { |
| 789 | blocks_term(ltable->info_blocks); |
| 790 | ltable->info_blocks = NULL; |
| 791 | } |
| 792 | if ( ltable->key_blocks != NULL ) { |
| 793 | blocks_term(ltable->key_blocks); |
| 794 | ltable->key_blocks = NULL; |
| 795 | } |
| 796 | |
| 797 | } lock_exit(ltable->lock); |
| 798 | |
| 799 | lock_destroy(ltable->lock); |
| 800 | ltable->lock = NULL; |
| 801 | |
| 802 | HPROF_FREE(ltable); |
| 803 | ltable = NULL; |
| 804 | } |
| 805 | |
| 806 | TableIndex |
| 807 | table_create_entry(LookupTable *ltable, void *key_ptr, int key_len, void *info_ptr) |
| 808 | { |
| 809 | TableIndex index; |
| 810 | HashCode hcode; |
| 811 | |
| 812 | HPROF_ASSERT(ltable!=NULL); |
| 813 | |
| 814 | /* Create hash code if needed */ |
| 815 | hcode = 0; |
| 816 | if ( ltable->hash_bucket_count > 0 ) { |
| 817 | hcode = hashcode(key_ptr, key_len); |
| 818 | } |
| 819 | |
| 820 | /* Create a new entry */ |
| 821 | lock_enter(ltable->lock); { |
| 822 | |
| 823 | /* Need to create a new entry */ |
| 824 | index = setup_new_entry(ltable, key_ptr, key_len, info_ptr); |
| 825 | |
| 826 | /* Add to hash table if we have one */ |
| 827 | if ( ltable->hash_bucket_count > 0 ) { |
| 828 | hash_in(ltable, index, hcode); |
| 829 | } |
| 830 | |
| 831 | } lock_exit(ltable->lock); |
| 832 | return SANITY_ADD_HARE(index, ltable->hare); |
| 833 | } |
| 834 | |
| 835 | TableIndex |
| 836 | table_find_entry(LookupTable *ltable, void *key_ptr, int key_len) |
| 837 | { |
| 838 | TableIndex index; |
| 839 | HashCode hcode; |
| 840 | |
| 841 | /* Create hash code if needed */ |
| 842 | hcode = 0; |
| 843 | if ( ltable->hash_bucket_count > 0 ) { |
| 844 | hcode = hashcode(key_ptr, key_len); |
| 845 | } |
| 846 | |
| 847 | /* Look for element */ |
| 848 | lock_enter(ltable->lock); { |
| 849 | index = find_entry(ltable, key_ptr, key_len, hcode); |
| 850 | } lock_exit(ltable->lock); |
| 851 | |
| 852 | return index==0 ? index : SANITY_ADD_HARE(index, ltable->hare); |
| 853 | } |
| 854 | |
| 855 | TableIndex |
| 856 | table_find_or_create_entry(LookupTable *ltable, void *key_ptr, int key_len, |
| 857 | jboolean *pnew_entry, void *info_ptr) |
| 858 | { |
| 859 | TableIndex index; |
| 860 | HashCode hcode; |
| 861 | |
| 862 | /* Assume it is NOT a new entry for now */ |
| 863 | if ( pnew_entry ) { |
| 864 | *pnew_entry = JNI_FALSE; |
| 865 | } |
| 866 | |
| 867 | /* Create hash code if needed */ |
| 868 | hcode = 0; |
| 869 | if ( ltable->hash_bucket_count > 0 ) { |
| 870 | hcode = hashcode(key_ptr, key_len); |
| 871 | } |
| 872 | |
| 873 | /* Look for element */ |
| 874 | index = 0; |
| 875 | lock_enter(ltable->lock); { |
| 876 | if ( ltable->hash_bucket_count > 0 ) { |
| 877 | index = find_entry(ltable, key_ptr, key_len, hcode); |
| 878 | } |
| 879 | if ( index == 0 ) { |
| 880 | |
| 881 | /* Need to create a new entry */ |
| 882 | index = setup_new_entry(ltable, key_ptr, key_len, info_ptr); |
| 883 | |
| 884 | /* Add to hash table if we have one */ |
| 885 | if ( ltable->hash_bucket_count > 0 ) { |
| 886 | hash_in(ltable, index, hcode); |
| 887 | } |
| 888 | |
| 889 | if ( pnew_entry ) { |
| 890 | *pnew_entry = JNI_TRUE; |
| 891 | } |
| 892 | } |
| 893 | } lock_exit(ltable->lock); |
| 894 | |
| 895 | return SANITY_ADD_HARE(index, ltable->hare); |
| 896 | } |
| 897 | |
| 898 | void * |
| 899 | table_get_info(LookupTable *ltable, TableIndex index) |
| 900 | { |
| 901 | void *info; |
| 902 | |
| 903 | HPROF_ASSERT(ltable!=NULL); |
| 904 | HPROF_ASSERT(ltable->info_size > 0); |
| 905 | SANITY_CHECK_HARE(index, ltable->hare); |
| 906 | index = SANITY_REMOVE_HARE(index); |
| 907 | SANITY_CHECK_INDEX(ltable, index); |
| 908 | |
| 909 | lock_enter(ltable->lock); { |
| 910 | HPROF_ASSERT(!is_freed_entry(ltable, index)); |
| 911 | info = get_info(ltable,index); |
| 912 | } lock_exit(ltable->lock); |
| 913 | |
| 914 | return info; |
| 915 | } |
| 916 | |
| 917 | void |
| 918 | table_get_key(LookupTable *ltable, TableIndex index, void **pkey_ptr, int *pkey_len) |
| 919 | { |
| 920 | HPROF_ASSERT(ltable!=NULL); |
| 921 | HPROF_ASSERT(pkey_ptr!=NULL); |
| 922 | HPROF_ASSERT(pkey_len!=NULL); |
| 923 | SANITY_CHECK_HARE(index, ltable->hare); |
| 924 | HPROF_ASSERT(ltable->elem_size!=0); |
| 925 | index = SANITY_REMOVE_HARE(index); |
| 926 | SANITY_CHECK_INDEX(ltable, index); |
| 927 | |
| 928 | lock_enter(ltable->lock); { |
| 929 | HPROF_ASSERT(!is_freed_entry(ltable, index)); |
| 930 | get_key(ltable, index, pkey_ptr, pkey_len); |
| 931 | } lock_exit(ltable->lock); |
| 932 | } |
| 933 | |
| 934 | void |
| 935 | table_lock_enter(LookupTable *ltable) |
| 936 | { |
| 937 | lock_enter(ltable->lock); |
| 938 | } |
| 939 | |
| 940 | void |
| 941 | table_lock_exit(LookupTable *ltable) |
| 942 | { |
| 943 | lock_exit(ltable->lock); |
| 944 | } |