sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1 | |
| 2 | /*--------------------------------------------------------------------*/ |
| 3 | /*--- Management of the translation table and cache. ---*/ |
| 4 | /*--- vg_transtab.c ---*/ |
| 5 | /*--------------------------------------------------------------------*/ |
| 6 | |
| 7 | /* |
| 8 | This file is part of Valgrind, an x86 protected-mode emulator |
| 9 | designed for debugging and profiling binaries on x86-Unixes. |
| 10 | |
| 11 | Copyright (C) 2000-2002 Julian Seward |
| 12 | jseward@acm.org |
| 13 | Julian_Seward@muraroa.demon.co.uk |
| 14 | |
| 15 | This program is free software; you can redistribute it and/or |
| 16 | modify it under the terms of the GNU General Public License as |
| 17 | published by the Free Software Foundation; either version 2 of the |
| 18 | License, or (at your option) any later version. |
| 19 | |
| 20 | This program is distributed in the hope that it will be useful, but |
| 21 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 23 | General Public License for more details. |
| 24 | |
| 25 | You should have received a copy of the GNU General Public License |
| 26 | along with this program; if not, write to the Free Software |
| 27 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 28 | 02111-1307, USA. |
| 29 | |
| 30 | The GNU General Public License is contained in the file LICENSE. |
| 31 | */ |
| 32 | |
| 33 | #include "vg_include.h" |
| 34 | #include "vg_constants.h" |
| 35 | |
| 36 | |
| 37 | /*------------------------------------------------------------*/ |
| 38 | /*--- Management of the LRU-based translation table+cache. ---*/ |
| 39 | /*------------------------------------------------------------*/ |
| 40 | |
| 41 | /* These sizes were set up so as to be able to debug large KDE 3 |
| 42 | applications (are there any small ones?) without excessive amounts |
| 43 | of code retranslation. */ |
| 44 | |
| 45 | /* Size of the translation cache, in bytes. */ |
| 46 | #define VG_TC_SIZE 16000000 |
| 47 | |
| 48 | /* Do a LRU pass when the translation cache becomes this full. */ |
| 49 | #define VG_TC_LIMIT_PERCENT 95 |
| 50 | |
| 51 | /* When doing an LRU pass, reduce TC fullness to this level. */ |
| 52 | #define VG_TC_TARGET_PERCENT 85 |
| 53 | |
| 54 | /* Number of entries in the translation table. This must be a prime |
| 55 | number in order to make the hashing work properly. */ |
| 56 | #define VG_TT_SIZE /*19997*/ /*29989*/ /*50497*/ /*75083*/ 100129 |
| 57 | |
| 58 | /* Do an LRU pass when the translation table becomes this full. */ |
| 59 | #define VG_TT_LIMIT_PERCENT /*67*/ 80 |
| 60 | |
| 61 | /* When doing an LRU pass, reduce TT fullness to this level. */ |
| 62 | #define VG_TT_TARGET_PERCENT /*60*/ 70 |
| 63 | |
| 64 | /* The number of age steps we track. 0 means the current epoch, |
| 65 | N_EPOCHS-1 means used the epoch N_EPOCHS-1 or more ago. */ |
sewardj | ac68028 | 2002-03-25 02:02:52 +0000 | [diff] [blame] | 66 | #define VG_N_EPOCHS /*2000*/ /*4000*/ 20000 |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 67 | |
| 68 | /* This TT entry is empty. */ |
| 69 | #define VG_TTE_EMPTY ((Addr)1) |
| 70 | /* This TT entry has been deleted. */ |
| 71 | #define VG_TTE_DELETED ((Addr)3) |
| 72 | |
| 73 | /* The TC. This used to be statically allocated, but that forces many |
| 74 | SecMap arrays to be pointlessly allocated at startup, bloating the |
| 75 | process size by about 22M and making startup slow. So now we |
| 76 | dynamically allocate it at startup time. |
| 77 | was: static UChar vg_tc[VG_TC_SIZE]; |
| 78 | */ |
| 79 | static UChar* vg_tc = NULL; |
| 80 | |
| 81 | /* Count of bytes used in the TC. */ |
| 82 | static Int vg_tc_used = 0; |
| 83 | |
| 84 | /* The TT. Like TC, for the same reason, is dynamically allocated at |
| 85 | startup. |
| 86 | was: static TTEntry vg_tt[VG_TT_SIZE]; |
| 87 | */ |
| 88 | static TTEntry* vg_tt = NULL; |
| 89 | |
| 90 | /* Count of non-empty, non-deleted TT entries. */ |
| 91 | static Int vg_tt_used = 0; |
| 92 | |
| 93 | /* Fast helper for the TT. A direct-mapped cache which holds a |
| 94 | pointer to a TT entry which may or may not be the correct one, but |
| 95 | which we hope usually is. This array is referred to directly from |
| 96 | vg_dispatch.S. */ |
| 97 | Addr VG_(tt_fast)[VG_TT_FAST_SIZE]; |
| 98 | |
| 99 | /* For reading/writing the misaligned TT-index word at immediately |
| 100 | preceding every translation in TC. */ |
| 101 | #define VG_READ_MISALIGNED_WORD(aaa) (*((UInt*)(aaa))) |
| 102 | #define VG_WRITE_MISALIGNED_WORD(aaa,vvv) *((UInt*)(aaa)) = ((UInt)(vvv)) |
| 103 | |
| 104 | /* Used for figuring out an age threshold for translations. */ |
| 105 | static Int vg_bytes_in_epoch[VG_N_EPOCHS]; |
| 106 | static Int vg_entries_in_epoch[VG_N_EPOCHS]; |
| 107 | |
| 108 | |
| 109 | /* Just so these counts can be queried without making them globally |
| 110 | visible. */ |
| 111 | void VG_(get_tt_tc_used) ( UInt* tt_used, UInt* tc_used ) |
| 112 | { |
| 113 | *tt_used = vg_tt_used; |
| 114 | *tc_used = vg_tc_used; |
| 115 | } |
| 116 | |
| 117 | |
| 118 | /* Do the LRU thing on TT/TC, clearing them back to the target limits |
| 119 | if they are over the threshold limits. |
| 120 | */ |
| 121 | void VG_(maybe_do_lru_pass) ( void ) |
| 122 | { |
| 123 | Int i, j, r, w, thresh, ttno; |
| 124 | TTEntry* tte; |
| 125 | |
| 126 | const Int tc_limit = (Int)((VG_TC_SIZE * VG_TC_LIMIT_PERCENT) / 100.0); |
| 127 | const Int tt_limit = (Int)((VG_TT_SIZE * VG_TT_LIMIT_PERCENT) / 100.0); |
| 128 | const Int tc_target = (Int)((VG_TC_SIZE * VG_TC_TARGET_PERCENT) / 100.0); |
| 129 | const Int tt_target = (Int)((VG_TT_SIZE * VG_TT_TARGET_PERCENT) / 100.0); |
| 130 | |
| 131 | /* Decide quickly if we need to do an LRU pass ? */ |
| 132 | if (vg_tc_used <= tc_limit && vg_tt_used <= tt_limit) |
| 133 | return; |
| 134 | |
| 135 | VGP_PUSHCC(VgpDoLRU); |
| 136 | /* |
| 137 | VG_(printf)( |
| 138 | "limits: tc_limit %d, tt_limit %d, tc_target %d, tt_target %d\n", |
| 139 | tc_limit, tt_limit, tc_target, tt_target); |
| 140 | */ |
| 141 | |
| 142 | if (VG_(clo_verbosity) > 2) |
| 143 | VG_(printf)(" pre-LRU: tc %d (target %d), tt %d (target %d)\n", |
| 144 | vg_tc_used, tc_target, vg_tt_used, tt_target); |
| 145 | |
| 146 | /* Yes we do. Figure out what threshold age is required in order to |
| 147 | shrink both the TC and TT occupancy below TC_TARGET_PERCENT and |
| 148 | TT_TARGET_PERCENT respectively. */ |
| 149 | |
| 150 | VG_(number_of_lrus)++; |
| 151 | |
| 152 | /* Count the number of TC bytes and TT entries in each epoch. */ |
| 153 | for (i = 0; i < VG_N_EPOCHS; i++) |
| 154 | vg_bytes_in_epoch[i] = vg_entries_in_epoch[i] = 0; |
| 155 | |
| 156 | for (i = 0; i < VG_TT_SIZE; i++) { |
| 157 | if (vg_tt[i].orig_addr == VG_TTE_EMPTY || |
| 158 | vg_tt[i].orig_addr == VG_TTE_DELETED) continue; |
| 159 | j = vg_tt[i].mru_epoch; |
| 160 | vg_assert(j <= VG_(current_epoch)); |
| 161 | j = VG_(current_epoch) - j; |
| 162 | if (j >= VG_N_EPOCHS) j = VG_N_EPOCHS-1; |
| 163 | vg_assert(0 <= j && j < VG_N_EPOCHS); |
| 164 | /* Greater j now means older. */ |
| 165 | vg_entries_in_epoch[j]++; |
| 166 | vg_bytes_in_epoch[j] += 4+vg_tt[i].trans_size; |
| 167 | } |
| 168 | |
| 169 | /* |
| 170 | for (i = 0; i < VG_N_EPOCHS; i++) |
| 171 | VG_(printf)("epoch %d: ents %d, bytes %d\n", |
| 172 | i, vg_entries_in_epoch[i], vg_bytes_in_epoch[i]); |
| 173 | */ |
| 174 | |
| 175 | /* Cumulatise. Make vg_{bytes,entries}_in_epoch[n] contain the |
| 176 | counts for itself and all younger epochs. */ |
| 177 | for (i = 1; i < VG_N_EPOCHS; i++) { |
| 178 | vg_entries_in_epoch[i] += vg_entries_in_epoch[i-1]; |
| 179 | vg_bytes_in_epoch[i] += vg_bytes_in_epoch[i-1]; |
| 180 | } |
| 181 | |
| 182 | for (thresh = 0; thresh < VG_N_EPOCHS; thresh++) { |
| 183 | if (vg_entries_in_epoch[thresh] > tt_target |
| 184 | || vg_bytes_in_epoch[thresh] >= tc_target) |
| 185 | break; |
| 186 | } |
| 187 | |
| 188 | if (VG_(clo_verbosity) > 2) |
| 189 | VG_(printf)( |
| 190 | " LRU: discard translations %d or more epochs since last use\n", |
| 191 | thresh |
| 192 | ); |
| 193 | |
| 194 | thresh = VG_(current_epoch) - thresh; |
| 195 | |
| 196 | /* Ok, so we will hit our targets if we retain all entries most |
| 197 | recently used at most thresh epochs ago. Traverse the TT and |
| 198 | mark such entries as deleted. */ |
| 199 | for (i = 0; i < VG_TT_SIZE; i++) { |
| 200 | if (vg_tt[i].orig_addr == VG_TTE_EMPTY || |
| 201 | vg_tt[i].orig_addr == VG_TTE_DELETED) continue; |
| 202 | if (vg_tt[i].mru_epoch <= thresh) { |
| 203 | vg_tt[i].orig_addr = VG_TTE_DELETED; |
| 204 | vg_tt_used--; |
| 205 | VG_(this_epoch_out_count) ++; |
| 206 | VG_(this_epoch_out_osize) += vg_tt[i].orig_size; |
| 207 | VG_(this_epoch_out_tsize) += vg_tt[i].trans_size; |
| 208 | VG_(overall_out_count) ++; |
| 209 | VG_(overall_out_osize) += vg_tt[i].orig_size; |
| 210 | VG_(overall_out_tsize) += vg_tt[i].trans_size; |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | vg_assert(vg_tt_used >= 0); |
| 215 | vg_assert(vg_tt_used <= tt_target); |
| 216 | |
| 217 | /* Now compact the TC, sliding live entries downwards to fill spaces |
| 218 | left by deleted entries. In this loop, r is the offset in TC of |
| 219 | the current translation under consideration, and w is the next |
| 220 | allocation point. */ |
| 221 | r = w = 0; |
| 222 | while (True) { |
| 223 | if (r >= vg_tc_used) break; |
| 224 | /* The first four bytes of every translation contain the index |
| 225 | of its TT entry. The TT entry's .trans_addr field points at |
| 226 | the start of the code proper, not at this 4-byte index, so |
| 227 | that we don't constantly have to keep adding 4 in the main |
| 228 | lookup/dispatch loop. */ |
| 229 | ttno = VG_READ_MISALIGNED_WORD(&vg_tc[r]); |
| 230 | vg_assert(ttno >= 0 && ttno < VG_TT_SIZE); |
| 231 | tte = & vg_tt[ ttno ]; |
| 232 | vg_assert(tte->orig_addr != VG_TTE_EMPTY); |
| 233 | if (tte->orig_addr != VG_TTE_DELETED) { |
| 234 | /* We want to keep this one alive. */ |
| 235 | /* Sanity check the pointer back to TC. */ |
| 236 | vg_assert(tte->trans_addr == (Addr)&vg_tc[r+4]); |
| 237 | for (i = 0; i < 4+tte->trans_size; i++) |
| 238 | vg_tc[w+i] = vg_tc[r+i]; |
| 239 | tte->trans_addr = (Addr)&vg_tc[w+4]; |
| 240 | w += 4+tte->trans_size; |
| 241 | } |
| 242 | r += 4+tte->trans_size; |
| 243 | } |
| 244 | /* should have traversed an exact number of translations, with no |
| 245 | slop at the end. */ |
| 246 | vg_assert(w <= r); |
| 247 | vg_assert(r == vg_tc_used); |
| 248 | vg_assert(w <= r); |
| 249 | vg_assert(w <= tc_target); |
| 250 | vg_tc_used = w; |
| 251 | |
| 252 | /* Invalidate the fast cache, since it is now out of date. It will get |
| 253 | reconstructed incrementally when the client resumes. */ |
| 254 | VG_(invalidate_tt_fast)(); |
| 255 | |
| 256 | if (VG_(clo_verbosity) > 2) |
| 257 | VG_(printf)("post-LRU: tc %d (target %d), tt %d (target %d)\n", |
| 258 | vg_tc_used, tc_target, vg_tt_used, tt_target); |
| 259 | |
| 260 | if (VG_(clo_verbosity) > 1) |
| 261 | VG_(message)(Vg_UserMsg, |
| 262 | "epoch %d (bb %luk): thresh %d, " |
| 263 | "out %d (%dk -> %dk), new TT %d, TC %dk", |
| 264 | VG_(current_epoch), |
| 265 | VG_(bbs_done) / 1000, |
| 266 | VG_(current_epoch) - thresh, |
| 267 | VG_(this_epoch_out_count), |
| 268 | VG_(this_epoch_out_osize) / 1000, |
| 269 | VG_(this_epoch_out_tsize) / 1000, |
| 270 | vg_tt_used, vg_tc_used / 1000 |
| 271 | ); |
| 272 | |
| 273 | /* Reconstruct the SMC detection structures. */ |
| 274 | |
| 275 | VGP_POPCC; |
| 276 | } |
| 277 | |
| 278 | |
| 279 | /* Do a sanity check on TT/TC. |
| 280 | */ |
| 281 | void VG_(sanity_check_tc_tt) ( void ) |
| 282 | { |
| 283 | Int i, counted_entries, counted_bytes; |
| 284 | TTEntry* tte; |
| 285 | counted_entries = 0; |
| 286 | counted_bytes = 0; |
| 287 | for (i = 0; i < VG_TT_SIZE; i++) { |
| 288 | tte = &vg_tt[i]; |
| 289 | if (tte->orig_addr == VG_TTE_EMPTY) continue; |
| 290 | if (tte->orig_addr == VG_TTE_DELETED) continue; |
| 291 | vg_assert(tte->mru_epoch >= 0); |
| 292 | vg_assert(tte->mru_epoch <= VG_(current_epoch)); |
| 293 | counted_entries++; |
| 294 | counted_bytes += 4+tte->trans_size; |
| 295 | vg_assert(tte->trans_addr >= (Addr)&vg_tc[4]); |
| 296 | vg_assert(tte->trans_addr < (Addr)&vg_tc[vg_tc_used]); |
| 297 | vg_assert(VG_READ_MISALIGNED_WORD(tte->trans_addr-4) == i); |
| 298 | } |
| 299 | vg_assert(counted_entries == vg_tt_used); |
| 300 | vg_assert(counted_bytes == vg_tc_used); |
| 301 | } |
| 302 | |
| 303 | |
| 304 | /* Add this already-filled-in entry to the TT. Assumes that the |
| 305 | relevant code chunk has been placed in TC, along with a dummy back |
| 306 | pointer, which is inserted here. |
| 307 | */ |
| 308 | extern void VG_(add_to_trans_tab) ( TTEntry* tte ) |
| 309 | { |
| 310 | Int i; |
| 311 | /* |
| 312 | VG_(printf)("add_to_trans_tab(%d) %x %d %x %d\n", |
| 313 | vg_tt_used, tte->orig_addr, tte->orig_size, |
| 314 | tte->trans_addr, tte->trans_size); |
| 315 | */ |
| 316 | vg_assert(tte->orig_addr != VG_TTE_DELETED |
| 317 | && tte->orig_addr != VG_TTE_EMPTY); |
| 318 | /* Hash to get initial probe point. */ |
| 319 | i = ((UInt)(tte->orig_addr)) % VG_TT_SIZE; |
| 320 | while (True) { |
| 321 | if (vg_tt[i].orig_addr == tte->orig_addr) |
| 322 | VG_(panic)("add_to_trans_tab: duplicate"); |
| 323 | if (vg_tt[i].orig_addr == VG_TTE_DELETED || |
| 324 | vg_tt[i].orig_addr == VG_TTE_EMPTY) { |
| 325 | /* Put it here, and set the back pointer. */ |
| 326 | vg_tt[i] = *tte; |
| 327 | VG_WRITE_MISALIGNED_WORD(tte->trans_addr-4, i); |
| 328 | vg_tt_used++; |
| 329 | return; |
| 330 | } |
| 331 | i++; |
| 332 | if (i == VG_TT_SIZE) i = 0; |
| 333 | } |
| 334 | } |
| 335 | |
| 336 | |
| 337 | /* Copy a new translation's code into TC, leaving a 4-byte hole for |
| 338 | the back pointer, and returning a pointer to the code proper (not |
| 339 | the hole) in TC. |
| 340 | */ |
| 341 | Addr VG_(copy_to_transcache) ( Addr trans_addr, Int trans_size ) |
| 342 | { |
| 343 | Int i; |
| 344 | Addr ret_addr; |
| 345 | if (4+trans_size > VG_TC_SIZE-vg_tc_used) |
| 346 | VG_(panic)("copy_to_transcache: not enough free space?!"); |
| 347 | /* Leave a hole for the back pointer to the TT entry. */ |
| 348 | vg_tc_used += 4; |
| 349 | ret_addr = (Addr)&vg_tc[vg_tc_used]; |
| 350 | for (i = 0; i < trans_size; i++) |
| 351 | vg_tc[vg_tc_used+i] = ((UChar*)trans_addr)[i]; |
| 352 | vg_tc_used += trans_size; |
| 353 | return ret_addr; |
| 354 | } |
| 355 | |
| 356 | |
| 357 | /* Invalidate the tt_fast cache, for whatever reason. Tricky. We |
| 358 | have to find a TTE_EMPTY slot to point all entries at. */ |
| 359 | void VG_(invalidate_tt_fast)( void ) |
| 360 | { |
| 361 | Int i, j; |
| 362 | for (i = 0; i < VG_TT_SIZE && vg_tt[i].orig_addr != VG_TTE_EMPTY; i++) |
| 363 | ; |
| 364 | vg_assert(i < VG_TT_SIZE |
| 365 | && vg_tt[i].orig_addr == VG_TTE_EMPTY); |
| 366 | for (j = 0; j < VG_TT_FAST_SIZE; j++) |
| 367 | VG_(tt_fast)[j] = (Addr)&vg_tt[i]; |
| 368 | } |
| 369 | |
| 370 | |
| 371 | /* Search TT to find the translated address of the supplied original, |
| 372 | or NULL if not found. This routine is used when we miss in |
| 373 | VG_(tt_fast). |
| 374 | */ |
| 375 | static __inline__ TTEntry* search_trans_table ( Addr orig_addr ) |
| 376 | { |
| 377 | //static Int queries = 0; |
| 378 | //static Int probes = 0; |
| 379 | Int i; |
| 380 | /* Hash to get initial probe point. */ |
| 381 | // if (queries == 10000) { |
| 382 | // VG_(printf)("%d queries, %d probes\n", queries, probes); |
| 383 | // queries = probes = 0; |
| 384 | //} |
| 385 | //queries++; |
| 386 | i = ((UInt)orig_addr) % VG_TT_SIZE; |
| 387 | while (True) { |
| 388 | //probes++; |
| 389 | if (vg_tt[i].orig_addr == orig_addr) |
| 390 | return &vg_tt[i]; |
| 391 | if (vg_tt[i].orig_addr == VG_TTE_EMPTY) |
| 392 | return NULL; |
| 393 | i++; |
| 394 | if (i == VG_TT_SIZE) i = 0; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | |
| 399 | /* Find the translation address for a given (original) code address. |
| 400 | If found, update VG_(tt_fast) so subsequent lookups are fast. If |
| 401 | no translation can be found, return zero. This routine is (the |
| 402 | only one) called from vg_run_innerloop. */ |
| 403 | Addr VG_(search_transtab) ( Addr original_addr ) |
| 404 | { |
| 405 | TTEntry* tte; |
| 406 | VGP_PUSHCC(VgpSlowFindT); |
| 407 | tte = search_trans_table ( original_addr ); |
| 408 | if (tte == NULL) { |
| 409 | /* We didn't find it. vg_run_innerloop will have to request a |
| 410 | translation. */ |
| 411 | VGP_POPCC; |
| 412 | return (Addr)0; |
| 413 | } else { |
| 414 | /* Found it. Put the search result into the fast cache now. |
| 415 | Also set the mru_epoch to mark this translation as used. */ |
| 416 | UInt cno = (UInt)original_addr & VG_TT_FAST_MASK; |
| 417 | VG_(tt_fast)[cno] = (Addr)tte; |
| 418 | VG_(tt_fast_misses)++; |
| 419 | tte->mru_epoch = VG_(current_epoch); |
| 420 | VGP_POPCC; |
| 421 | return tte->trans_addr; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | |
| 426 | /*------------------------------------------------------------*/ |
| 427 | /*--- Detecting and handling self-modifying code. ---*/ |
| 428 | /*------------------------------------------------------------*/ |
| 429 | |
| 430 | /* This mechanism uses two data structures: |
| 431 | |
| 432 | vg_oldmap -- array[64k] of Bool, which approximately records |
| 433 | parts of the address space corresponding to code for which |
| 434 | a translation exists in the translation table. vg_oldmap is |
| 435 | consulted at each write, to determine whether that write might |
| 436 | be writing a code address; if so, the program is stopped at |
| 437 | the next jump, and the corresponding translations are invalidated. |
| 438 | |
| 439 | Precise semantics: vg_oldmap[(a >> 8) & 0xFFFF] is true for all |
| 440 | addresses a containing a code byte which has been translated. So |
| 441 | it acts kind-of like a direct-mapped cache with 64k entries. |
| 442 | |
| 443 | The second structure is vg_CAW, a small array of addresses at which |
| 444 | vg_oldmap indicates a code write may have happened. This is |
| 445 | (effectively) checked at each control transfer (jump), so that |
| 446 | translations can be discarded before going on. An array is |
| 447 | somewhat overkill, since it strikes me as very unlikely that a |
| 448 | single basic block will do more than one code write. Nevertheless |
| 449 | ... |
| 450 | |
| 451 | ToDo: make this comment up-to-date. |
| 452 | */ |
| 453 | |
| 454 | |
| 455 | /* Definitions for the self-modifying-code detection cache, intended |
| 456 | as a fast check which clears the vast majority of writes. */ |
| 457 | |
| 458 | #define VG_SMC_CACHE_HASH(aaa) \ |
| 459 | ((((UInt)a) >> VG_SMC_CACHE_SHIFT) & VG_SMC_CACHE_MASK) |
| 460 | |
| 461 | Bool VG_(smc_cache)[VG_SMC_CACHE_SIZE]; |
| 462 | |
| 463 | |
| 464 | /* Definitions for the fallback mechanism, which, more slowly, |
| 465 | provides a precise record of which words in the address space |
| 466 | belong to original code. */ |
| 467 | |
| 468 | typedef struct { UChar chars[2048]; } VgSmcSecondary; |
| 469 | |
| 470 | static VgSmcSecondary* vg_smc_primary[65536]; |
| 471 | |
| 472 | static VgSmcSecondary* vg_smc_new_secondary ( void ) |
| 473 | { |
| 474 | Int i; |
| 475 | VgSmcSecondary* sec |
| 476 | = VG_(malloc) ( VG_AR_PRIVATE, sizeof(VgSmcSecondary) ); |
| 477 | for (i = 0; i < 2048; i++) |
| 478 | sec->chars[i] = 0; |
| 479 | return sec; |
| 480 | } |
| 481 | |
| 482 | #define GET_BIT_ARRAY(arr,indx) \ |
| 483 | (1 & ( ((UChar*)arr)[((UInt)indx) / 8] \ |
| 484 | >> ( ((UInt)indx) % 8) ) ) |
| 485 | |
| 486 | #define SET_BIT_ARRAY(arr,indx) \ |
| 487 | ((UChar*)arr)[((UInt)indx) / 8] |= (1 << ((UInt)indx) % 8) |
| 488 | |
| 489 | |
| 490 | /* Finally, a place to record the original-code-write addresses |
| 491 | detected in a basic block. */ |
| 492 | |
| 493 | #define VG_ORIGWRITES_SIZE 10 |
| 494 | |
| 495 | static Addr vg_origwrites[VG_ORIGWRITES_SIZE]; |
| 496 | static Int vg_origwrites_used; |
| 497 | |
| 498 | |
| 499 | /* Call here to check a written address. */ |
| 500 | |
| 501 | void VG_(smc_check4) ( Addr a ) |
| 502 | { |
| 503 | UInt bit_index; |
| 504 | VgSmcSecondary* smc_secondary; |
| 505 | |
| 506 | # if VG_SMC_FASTCHECK_IN_C |
| 507 | VG_(smc_total_check4s)++; |
| 508 | |
| 509 | /* Try the fast check first. */ |
| 510 | if (VG_(smc_cache)[VG_SMC_CACHE_HASH(a)] == False) return; |
| 511 | # endif |
| 512 | |
| 513 | VG_(smc_cache_passed)++; |
| 514 | |
| 515 | /* Need to do a slow check. */ |
| 516 | smc_secondary = vg_smc_primary[a >> 16]; |
| 517 | if (smc_secondary == NULL) return; |
| 518 | |
| 519 | bit_index = (a & 0xFFFF) >> 2; |
| 520 | if (GET_BIT_ARRAY(smc_secondary->chars, bit_index) == 0) return; |
| 521 | |
| 522 | VG_(smc_fancy_passed)++; |
| 523 | |
| 524 | /* Detected a Real Live write to code which has been translated. |
| 525 | Note it. */ |
| 526 | if (vg_origwrites_used == VG_ORIGWRITES_SIZE) |
| 527 | VG_(panic)("VG_ORIGWRITES_SIZE is too small; " |
| 528 | "increase and recompile."); |
| 529 | vg_origwrites[vg_origwrites_used] = a; |
| 530 | vg_origwrites_used++; |
| 531 | |
| 532 | VG_(message)(Vg_DebugMsg, "self-modifying-code write at %p", a); |
| 533 | |
| 534 | /* Force an exit before the next basic block, so the translation |
| 535 | cache can be flushed appropriately. */ |
sewardj | 2e93c50 | 2002-04-12 11:12:52 +0000 | [diff] [blame^] | 536 | // VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr); |
| 537 | //VG_(dispatch_ctr) = 1; |
| 538 | //VG_(interrupt_reason) = VG_Y_SMC; |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 539 | } |
| 540 | |
| 541 | |
| 542 | /* Mark an address range as containing an original translation, |
| 543 | updating both the fast-check cache and the slow-but-correct data |
| 544 | structure. |
| 545 | */ |
| 546 | void VG_(smc_mark_original) ( Addr orig_addr, Int orig_size ) |
| 547 | { |
| 548 | Addr a; |
| 549 | VgSmcSecondary* smc_secondary; |
| 550 | UInt bit_index; |
| 551 | |
| 552 | for (a = orig_addr; a < orig_addr+orig_size; a++) { |
| 553 | |
| 554 | VG_(smc_cache)[VG_SMC_CACHE_HASH(a)] = True; |
| 555 | |
| 556 | smc_secondary = vg_smc_primary[a >> 16]; |
| 557 | if (smc_secondary == NULL) |
| 558 | smc_secondary = |
| 559 | vg_smc_primary[a >> 16] = vg_smc_new_secondary(); |
| 560 | |
| 561 | bit_index = (a & 0xFFFF) >> 2; |
| 562 | SET_BIT_ARRAY(smc_secondary->chars, bit_index); |
| 563 | } |
| 564 | } |
| 565 | |
| 566 | |
| 567 | /* Discard any translations whose original code overlaps with the |
| 568 | range w_addr .. w_addr+3 inclusive. |
| 569 | */ |
| 570 | __attribute__ ((unused)) |
| 571 | static void discard_translations_bracketing ( Addr w_addr ) |
| 572 | { |
| 573 | # if 0 |
| 574 | Int i, rd, wr; |
| 575 | Addr o_start, o_end; |
| 576 | TTEntry* tt; |
| 577 | |
| 578 | for (i = 0; i < VG_TRANSTAB_SLOW_SIZE; i++) { |
| 579 | tt = vg_transtab[i]; |
| 580 | wr = 0; |
| 581 | for (rd = 0; rd < vg_transtab_used[i]; rd++) { |
| 582 | o_start = tt[rd].orig_addr; |
| 583 | o_end = o_start + tt[rd].orig_size; |
| 584 | if (w_addr > o_end || (w_addr+3) < o_start) { |
| 585 | /* No collision possible; keep this translation */ |
| 586 | VG_(smc_mark_original) ( tt[rd].orig_addr, tt[rd].orig_size ); |
| 587 | if (wr < rd) vg_transtab[wr] = vg_transtab[rd]; |
| 588 | wr++; |
| 589 | } else { |
| 590 | /* Possible collision; discard. */ |
| 591 | vg_smc_discards++; |
| 592 | VG_(message) (Vg_DebugMsg, |
| 593 | "discarding translation of %p .. %p", |
| 594 | tt[rd].orig_addr, |
| 595 | tt[rd].orig_addr + tt[rd].orig_size - 1); |
| 596 | VG_(free)((void*)tt[rd].trans_addr); |
| 597 | } |
| 598 | } |
| 599 | vg_transtab_used[i] = wr; |
| 600 | } |
| 601 | # endif |
| 602 | } |
| 603 | |
| 604 | |
| 605 | /* Top-level function in charge of discarding out-of-date translations |
| 606 | following the discovery of a (potential) original-code-write. |
| 607 | */ |
| 608 | void VG_(flush_transtab) ( void ) |
| 609 | { |
| 610 | # if 0 |
| 611 | Addr w_addr; |
| 612 | Int i, j; |
| 613 | |
| 614 | /* We shouldn't be here unless a code write was detected. */ |
| 615 | vg_assert(vg_origwrites_used > 0); |
| 616 | |
| 617 | /* Instead of incrementally fixing up the translation table cache, |
| 618 | just invalidate the whole darn thing. Pray this doesn't happen |
| 619 | very often :) */ |
| 620 | for (i = 0; i < VG_TRANSTAB_CACHE_SIZE; i++) |
| 621 | VG_(transtab_cache_orig)[i] = |
| 622 | VG_(transtab_cache_trans)[i] = (Addr)0; |
| 623 | |
| 624 | /* Clear out the fast cache; discard_translations_bracketing |
| 625 | reconstructs it. */ |
| 626 | for (i = 0; i < VG_SMC_CACHE_SIZE; i++) |
| 627 | VG_(smc_cache)[i] = False; |
| 628 | |
| 629 | /* And also clear the slow-but-correct table. */ |
| 630 | for (i = 0; i < 65536; i++) { |
| 631 | VgSmcSecondary* sec = vg_smc_primary[i]; |
| 632 | if (sec) |
| 633 | for (j = 0; j < 2048; j++) |
| 634 | sec->chars[j] = 0; |
| 635 | } |
| 636 | |
| 637 | /* This doesn't need to be particularly fast, since we (presumably) |
| 638 | don't have to handle particularly frequent writes to code |
| 639 | addresses. */ |
| 640 | while (vg_origwrites_used > 0) { |
| 641 | vg_origwrites_used--; |
| 642 | w_addr = vg_origwrites[vg_origwrites_used]; |
| 643 | discard_translations_bracketing ( w_addr ); |
| 644 | } |
| 645 | |
| 646 | vg_assert(vg_origwrites_used == 0); |
| 647 | # endif |
| 648 | } |
| 649 | |
| 650 | |
| 651 | /*------------------------------------------------------------*/ |
| 652 | /*--- Initialisation. ---*/ |
| 653 | /*------------------------------------------------------------*/ |
| 654 | |
| 655 | void VG_(init_transtab_and_SMC) ( void ) |
| 656 | { |
| 657 | Int i; |
| 658 | |
| 659 | /* Allocate the translation table and translation cache. */ |
| 660 | vg_assert(vg_tc == NULL); |
| 661 | vg_tc = VG_(get_memory_from_mmap) ( VG_TC_SIZE * sizeof(UChar) ); |
| 662 | vg_assert(vg_tc != NULL); |
| 663 | |
| 664 | vg_assert(vg_tt == NULL); |
| 665 | vg_tt = VG_(get_memory_from_mmap) ( VG_TT_SIZE * sizeof(TTEntry) ); |
| 666 | vg_assert(vg_tt != NULL); |
| 667 | |
| 668 | /* The main translation table is empty. */ |
| 669 | vg_tt_used = 0; |
| 670 | for (i = 0; i < VG_TT_SIZE; i++) { |
| 671 | vg_tt[i].orig_addr = VG_TTE_EMPTY; |
| 672 | } |
| 673 | |
| 674 | /* The translation table's fast cache is empty. Point all entries |
| 675 | at the first TT entry, which is, of course, empty. */ |
| 676 | for (i = 0; i < VG_TT_FAST_SIZE; i++) |
| 677 | VG_(tt_fast)[i] = (Addr)(&vg_tt[0]); |
| 678 | |
| 679 | /* No part of the address space has any translations. */ |
| 680 | for (i = 0; i < 65536; i++) |
| 681 | vg_smc_primary[i] = NULL; |
| 682 | |
| 683 | /* ... and the associated fast-check cache reflects this. */ |
| 684 | for (i = 0; i < VG_SMC_CACHE_SIZE; i++) |
| 685 | VG_(smc_cache)[i] = False; |
| 686 | |
| 687 | /* Finally, no original-code-writes have been recorded. */ |
| 688 | vg_origwrites_used = 0; |
| 689 | } |
| 690 | |
| 691 | /*--------------------------------------------------------------------*/ |
| 692 | /*--- end vg_transtab.c ---*/ |
| 693 | /*--------------------------------------------------------------------*/ |