sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 1 | |
| 2 | ##--------------------------------------------------------------------## |
| 3 | ##--- Support routines for the JITter output. ---## |
| 4 | ##--- vg_helpers.S ---## |
| 5 | ##--------------------------------------------------------------------## |
| 6 | |
| 7 | /* |
| 8 | This file is part of Valgrind, an x86 protected-mode emulator |
| 9 | designed for debugging and profiling binaries on x86-Unixes. |
| 10 | |
| 11 | Copyright (C) 2000-2002 Julian Seward |
| 12 | jseward@acm.org |
| 13 | Julian_Seward@muraroa.demon.co.uk |
| 14 | |
| 15 | This program is free software; you can redistribute it and/or |
| 16 | modify it under the terms of the GNU General Public License as |
| 17 | published by the Free Software Foundation; either version 2 of the |
| 18 | License, or (at your option) any later version. |
| 19 | |
| 20 | This program is distributed in the hope that it will be useful, but |
| 21 | WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 23 | General Public License for more details. |
| 24 | |
| 25 | You should have received a copy of the GNU General Public License |
| 26 | along with this program; if not, write to the Free Software |
| 27 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
| 28 | 02111-1307, USA. |
| 29 | |
| 30 | The GNU General Public License is contained in the file LICENSE. |
| 31 | */ |
| 32 | |
| 33 | #include "vg_constants.h" |
| 34 | |
| 35 | |
| 36 | /* Various helper routines, for instructions which are just too |
| 37 | darn tedious for the JITter to output code in-line: |
| 38 | |
| 39 | * integer division |
| 40 | * integer multiplication |
| 41 | * setting and getting obscure eflags |
| 42 | * double-length shifts |
| 43 | |
| 44 | All routines use a standard calling convention designed for |
| 45 | calling from translations, in which the incoming args are |
| 46 | underneath the return address, the callee saves _all_ registers, |
| 47 | and the incoming parameters can be modified, to return results. |
| 48 | */ |
| 49 | |
| 50 | |
| 51 | /* |
| 52 | On entry: |
| 53 | %ECX value |
| 54 | %EBX value |
| 55 | %EAX value -- also the result |
| 56 | RA <- %esp -- after pushal+pushfl is 36(%esp) |
| 57 | */ |
| 58 | .global VG_(helper_do_client_request) |
| 59 | VG_(helper_do_client_request): |
| 60 | pushal |
| 61 | pushfl |
| 62 | |
| 63 | movl 48(%esp), %eax |
| 64 | pushl %eax |
| 65 | movl 48(%esp), %eax |
| 66 | pushl %eax |
| 67 | movl 48(%esp), %eax |
| 68 | pushl %eax |
| 69 | |
| 70 | call VG_(handle_client_request) |
| 71 | movl %eax, 52(%esp) |
| 72 | |
| 73 | addl $12, %esp |
| 74 | |
| 75 | popfl |
| 76 | popal |
| 77 | ret |
| 78 | |
| 79 | |
| 80 | .global VG_(helper_do_syscall) |
| 81 | VG_(helper_do_syscall): |
| 82 | pushal |
| 83 | call VG_(wrap_syscall) |
| 84 | popal |
| 85 | # movl $VG_(baseBlock), %ebp |
| 86 | ret |
| 87 | |
| 88 | |
| 89 | |
| 90 | .global VG_(helper_value_check0_fail) |
| 91 | VG_(helper_value_check0_fail): |
| 92 | pushal |
| 93 | call VG_(helperc_value_check0_fail) |
| 94 | popal |
| 95 | ret |
| 96 | |
| 97 | .global VG_(helper_value_check1_fail) |
| 98 | VG_(helper_value_check1_fail): |
| 99 | pushal |
| 100 | call VG_(helperc_value_check1_fail) |
| 101 | popal |
| 102 | ret |
| 103 | |
| 104 | .global VG_(helper_value_check2_fail) |
| 105 | VG_(helper_value_check2_fail): |
| 106 | pushal |
| 107 | call VG_(helperc_value_check2_fail) |
| 108 | popal |
| 109 | ret |
| 110 | |
| 111 | .global VG_(helper_value_check4_fail) |
| 112 | VG_(helper_value_check4_fail): |
| 113 | pushal |
| 114 | call VG_(helperc_value_check4_fail) |
| 115 | popal |
| 116 | ret |
| 117 | |
| 118 | |
| 119 | /* Set things up so the dispatch loop exits normally. Used when it is |
| 120 | detected that the program wants to finish, ie it has called |
| 121 | vg_shutdown. |
| 122 | */ |
| 123 | .global VG_(helper_request_normal_exit) |
| 124 | VG_(helper_request_normal_exit): |
| 125 | pushl %eax |
| 126 | movl VG_(dispatch_ctr), %eax |
| 127 | movl %eax, VG_(dispatch_ctr_SAVED) |
| 128 | movl $1, VG_(dispatch_ctr) |
| 129 | movl $VG_Y_EXIT, VG_(interrupt_reason) |
| 130 | popl %eax |
| 131 | ret |
| 132 | |
| 133 | |
| 134 | /* Do a original-code-write check for the address in %ebp. */ |
| 135 | .global VG_(helper_smc_check4) |
| 136 | VG_(helper_smc_check4): |
| 137 | #if VG_SMC_FASTCHECK_IN_C |
| 138 | |
| 139 | # save the live regs |
| 140 | pushl %eax |
| 141 | pushl %ebx |
| 142 | pushl %ecx |
| 143 | pushl %edx |
| 144 | pushl %esi |
| 145 | pushl %edi |
| 146 | |
| 147 | pushl %ebp |
| 148 | call VG_(smc_check4) |
| 149 | addl $4, %esp |
| 150 | |
| 151 | popl %edi |
| 152 | popl %esi |
| 153 | popl %edx |
| 154 | popl %ecx |
| 155 | popl %ebx |
| 156 | popl %eax |
| 157 | |
| 158 | ret |
| 159 | #else |
| 160 | incl VG_(smc_total_check4s) |
| 161 | pushl %ebp |
| 162 | shrl $VG_SMC_CACHE_SHIFT, %ebp |
| 163 | andl $VG_SMC_CACHE_MASK, %ebp |
| 164 | cmpb $0, VG_(smc_cache)(%ebp) |
| 165 | jnz vg_smc_cache_failure |
| 166 | addl $4, %esp |
| 167 | ret |
| 168 | vg_smc_cache_failure: |
| 169 | popl %ebp |
| 170 | pushal |
| 171 | pushl %ebp |
| 172 | call VG_(smc_check4) |
| 173 | addl $4, %esp |
| 174 | popal |
| 175 | ret |
| 176 | #endif |
| 177 | |
| 178 | |
| 179 | /* Fetch the time-stamp-ctr reg. |
| 180 | On entry: |
| 181 | dummy, replaced by %EAX value |
| 182 | dummy, replaced by %EDX value |
| 183 | RA <- %esp |
| 184 | */ |
| 185 | .global VG_(helper_RDTSC) |
| 186 | VG_(helper_RDTSC): |
| 187 | pushl %eax |
| 188 | pushl %edx |
| 189 | rdtsc |
| 190 | movl %edx, 12(%esp) |
| 191 | movl %eax, 16(%esp) |
| 192 | popl %edx |
| 193 | popl %eax |
| 194 | ret |
| 195 | |
| 196 | |
| 197 | /* Do the CPUID instruction. |
| 198 | On entry: |
| 199 | dummy, replaced by %EAX value |
| 200 | dummy, replaced by %EBX value |
| 201 | dummy, replaced by %ECX value |
| 202 | dummy, replaced by %EDX value |
| 203 | RA <- %esp |
| 204 | |
| 205 | As emulating a real CPUID is kinda hard, as it |
| 206 | has to return different values depending on EAX, |
| 207 | we just pretend to not support CPUID at all until |
| 208 | it becomes a problem. This will for sure disable |
| 209 | all MMX / 3dnow checks so they don't bother us |
| 210 | with code we don't understand. (Dirk <dirk@kde.org>) |
| 211 | |
| 212 | http://www.sandpile.org/ia32/cpuid.htm |
| 213 | |
| 214 | (Later: we instead pretend to be like Werner's P54C P133, that is |
| 215 | an original pre-MMX Pentium). |
| 216 | <werner> cpuid words (0): 0x1 0x756e6547 0x6c65746e 0x49656e69 |
| 217 | <werner> cpuid words (1): 0x52b 0x0 0x0 0x1bf |
| 218 | */ |
| 219 | .global VG_(helper_CPUID) |
| 220 | VG_(helper_CPUID): |
| 221 | pushl %eax |
| 222 | pushl %ebx |
| 223 | pushl %ecx |
| 224 | pushl %edx |
| 225 | movl 32(%esp), %eax |
| 226 | /* |
| 227 | cpuid |
| 228 | */ |
| 229 | /* |
| 230 | xor %eax,%eax |
| 231 | xor %ebx,%ebx |
| 232 | xor %ecx,%ecx |
| 233 | xor %edx,%edx |
| 234 | */ |
| 235 | cmpl $0, %eax |
| 236 | jz cpuid__0 |
| 237 | movl $0x52b, %eax |
| 238 | movl $0x0, %ebx |
| 239 | movl $0x0, %ecx |
| 240 | movl $0x1bf, %edx |
| 241 | jmp cpuid__99 |
| 242 | cpuid__0: |
| 243 | movl $0x1, %eax |
| 244 | movl $0x756e6547, %ebx |
| 245 | movl $0x6c65746e, %ecx |
| 246 | movl $0x49656e69, %edx |
| 247 | cpuid__99: |
| 248 | |
| 249 | movl %edx, 20(%esp) |
| 250 | movl %ecx, 24(%esp) |
| 251 | movl %ebx, 28(%esp) |
| 252 | movl %eax, 32(%esp) |
| 253 | popl %edx |
| 254 | popl %ecx |
| 255 | popl %ebx |
| 256 | popl %eax |
| 257 | ret |
| 258 | |
| 259 | |
| 260 | /* Fetch the FPU status register. |
| 261 | On entry: |
| 262 | dummy, replaced by result |
| 263 | RA <- %esp |
| 264 | */ |
| 265 | .global VG_(helper_fstsw_AX) |
| 266 | VG_(helper_fstsw_AX): |
| 267 | pushl %eax |
| 268 | pushl %esi |
| 269 | movl VGOFF_(m_fpustate), %esi |
| 270 | frstor (%ebp, %esi, 4) |
| 271 | fstsw %ax |
| 272 | popl %esi |
| 273 | movw %ax, 8(%esp) |
| 274 | popl %eax |
| 275 | ret |
| 276 | |
| 277 | |
| 278 | /* Copy %ah into %eflags. |
| 279 | On entry: |
| 280 | value of %eax |
| 281 | RA <- %esp |
| 282 | */ |
| 283 | .global VG_(helper_SAHF) |
| 284 | VG_(helper_SAHF): |
| 285 | pushl %eax |
| 286 | movl 8(%esp), %eax |
| 287 | sahf |
| 288 | popl %eax |
| 289 | ret |
| 290 | |
| 291 | |
sewardj | 4d0ab1f | 2002-03-24 10:00:09 +0000 | [diff] [blame^] | 292 | /* Do %al = DAS(%al). Note that the passed param has %AL as the least |
| 293 | significant 8 bits, since it was generated with GETB %AL, |
| 294 | some-temp. Fortunately %al is the least significant 8 bits of |
| 295 | %eax anyway, which is why it's safe to work with %eax as a |
| 296 | whole. |
| 297 | |
| 298 | On entry: |
| 299 | value of %eax |
| 300 | RA <- %esp |
| 301 | */ |
| 302 | .global VG_(helper_DAS) |
| 303 | VG_(helper_DAS): |
| 304 | pushl %eax |
| 305 | movl 8(%esp), %eax |
| 306 | das |
| 307 | movl %eax, 8(%esp) |
| 308 | popl %eax |
| 309 | ret |
| 310 | |
| 311 | |
| 312 | |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 313 | /* Bit scan forwards/reverse. Sets flags (??). |
| 314 | On entry: |
| 315 | value, replaced by result |
| 316 | RA <- %esp |
| 317 | */ |
| 318 | .global VG_(helper_bsr) |
| 319 | VG_(helper_bsr): |
| 320 | pushl %eax |
| 321 | bsrl 8(%esp), %eax |
| 322 | movl %eax, 8(%esp) |
| 323 | popl %eax |
| 324 | ret |
| 325 | |
| 326 | .global VG_(helper_bsf) |
| 327 | VG_(helper_bsf): |
| 328 | pushl %eax |
| 329 | bsfl 8(%esp), %eax |
| 330 | movl %eax, 8(%esp) |
| 331 | popl %eax |
| 332 | ret |
| 333 | |
| 334 | |
| 335 | /* Bit test and set/reset/complement. Sets flags. |
| 336 | On entry: |
| 337 | src |
| 338 | dst |
| 339 | RA <- %esp |
sewardj | 4d0ab1f | 2002-03-24 10:00:09 +0000 | [diff] [blame^] | 340 | |
| 341 | NOTE all these are basically misimplemented, since for memory |
| 342 | operands it appears the index value can be arbitrary, and the |
| 343 | address should be calculated accordingly. Here, we assume (by |
| 344 | forcing the register- and memory- versions to be handled by |
| 345 | the same helper) that the offset is always in the range |
| 346 | 0 .. word-size-1, or to be more precise by implementing the |
| 347 | client's memory- version of this using the register- version, |
| 348 | we impose the condition that the offset is used |
| 349 | modulo-wordsize. This is just plain wrong and should be |
| 350 | fixed. |
sewardj | de4a1d0 | 2002-03-22 01:27:54 +0000 | [diff] [blame] | 351 | */ |
| 352 | .global VG_(helper_bt) |
| 353 | VG_(helper_bt): |
| 354 | pushl %eax |
| 355 | movl 12(%esp), %eax |
| 356 | btl %eax, 8(%esp) |
| 357 | popl %eax |
| 358 | ret |
| 359 | .global VG_(helper_bts) |
| 360 | VG_(helper_bts): |
| 361 | pushl %eax |
| 362 | movl 12(%esp), %eax |
| 363 | btsl %eax, 8(%esp) |
| 364 | popl %eax |
| 365 | ret |
| 366 | .global VG_(helper_btr) |
| 367 | VG_(helper_btr): |
| 368 | pushl %eax |
| 369 | movl 12(%esp), %eax |
| 370 | btrl %eax, 8(%esp) |
| 371 | popl %eax |
| 372 | ret |
| 373 | .global VG_(helper_btc) |
| 374 | VG_(helper_btc): |
| 375 | pushl %eax |
| 376 | movl 12(%esp), %eax |
| 377 | btcl %eax, 8(%esp) |
| 378 | popl %eax |
| 379 | ret |
| 380 | |
| 381 | |
| 382 | /* 32-bit double-length shift left/right. |
| 383 | On entry: |
| 384 | amount |
| 385 | src |
| 386 | dst |
| 387 | RA <- %esp |
| 388 | */ |
| 389 | .global VG_(helper_shldl) |
| 390 | VG_(helper_shldl): |
| 391 | pushl %eax |
| 392 | pushl %ebx |
| 393 | pushl %ecx |
| 394 | |
| 395 | movb 24(%esp), %cl |
| 396 | movl 20(%esp), %ebx |
| 397 | movl 16(%esp), %eax |
| 398 | shldl %cl, %ebx, %eax |
| 399 | movl %eax, 16(%esp) |
| 400 | |
| 401 | popl %ecx |
| 402 | popl %ebx |
| 403 | popl %eax |
| 404 | ret |
| 405 | |
| 406 | .global VG_(helper_shldw) |
| 407 | VG_(helper_shldw): |
| 408 | pushl %eax |
| 409 | pushl %ebx |
| 410 | pushl %ecx |
| 411 | |
| 412 | movb 24(%esp), %cl |
| 413 | movw 20(%esp), %bx |
| 414 | movw 16(%esp), %ax |
| 415 | shldw %cl, %bx, %ax |
| 416 | movw %ax, 16(%esp) |
| 417 | |
| 418 | popl %ecx |
| 419 | popl %ebx |
| 420 | popl %eax |
| 421 | ret |
| 422 | |
| 423 | .global VG_(helper_shrdl) |
| 424 | VG_(helper_shrdl): |
| 425 | pushl %eax |
| 426 | pushl %ebx |
| 427 | pushl %ecx |
| 428 | |
| 429 | movb 24(%esp), %cl |
| 430 | movl 20(%esp), %ebx |
| 431 | movl 16(%esp), %eax |
| 432 | shrdl %cl, %ebx, %eax |
| 433 | movl %eax, 16(%esp) |
| 434 | |
| 435 | popl %ecx |
| 436 | popl %ebx |
| 437 | popl %eax |
| 438 | ret |
| 439 | |
| 440 | .global VG_(helper_shrdw) |
| 441 | VG_(helper_shrdw): |
| 442 | pushl %eax |
| 443 | pushl %ebx |
| 444 | pushl %ecx |
| 445 | |
| 446 | movb 24(%esp), %cl |
| 447 | movw 20(%esp), %bx |
| 448 | movw 16(%esp), %ax |
| 449 | shrdw %cl, %bx, %ax |
| 450 | movw %ax, 16(%esp) |
| 451 | |
| 452 | popl %ecx |
| 453 | popl %ebx |
| 454 | popl %eax |
| 455 | ret |
| 456 | |
| 457 | |
| 458 | /* Get the direction flag, and return either 1 or -1. */ |
| 459 | .global VG_(helper_get_dirflag) |
| 460 | VG_(helper_get_dirflag): |
| 461 | pushfl |
| 462 | pushl %eax |
| 463 | |
| 464 | pushfl |
| 465 | popl %eax |
| 466 | shrl $10, %eax |
| 467 | andl $1, %eax |
| 468 | jnz L1 |
| 469 | movl $1, %eax |
| 470 | jmp L2 |
| 471 | L1: movl $-1, %eax |
| 472 | L2: movl %eax, 12(%esp) |
| 473 | |
| 474 | popl %eax |
| 475 | popfl |
| 476 | ret |
| 477 | |
| 478 | |
| 479 | /* Clear/set the direction flag. */ |
| 480 | .global VG_(helper_CLD) |
| 481 | VG_(helper_CLD): |
| 482 | cld |
| 483 | ret |
| 484 | |
| 485 | .global VG_(helper_STD) |
| 486 | VG_(helper_STD): |
| 487 | std |
| 488 | ret |
| 489 | |
| 490 | |
| 491 | |
| 492 | /* Signed 32-to-64 multiply. */ |
| 493 | .globl VG_(helper_imul_32_64) |
| 494 | VG_(helper_imul_32_64): |
| 495 | pushl %eax |
| 496 | pushl %edx |
| 497 | movl 16(%esp), %eax |
| 498 | imull 12(%esp) |
| 499 | movl %eax, 16(%esp) |
| 500 | movl %edx, 12(%esp) |
| 501 | popl %edx |
| 502 | popl %eax |
| 503 | ret |
| 504 | |
| 505 | /* Signed 16-to-32 multiply. */ |
| 506 | .globl VG_(helper_imul_16_32) |
| 507 | VG_(helper_imul_16_32): |
| 508 | pushl %eax |
| 509 | pushl %edx |
| 510 | movw 16(%esp), %ax |
| 511 | imulw 12(%esp) |
| 512 | movw %ax, 16(%esp) |
| 513 | movw %dx, 12(%esp) |
| 514 | popl %edx |
| 515 | popl %eax |
| 516 | ret |
| 517 | |
| 518 | /* Signed 8-to-16 multiply. */ |
| 519 | .globl VG_(helper_imul_8_16) |
| 520 | VG_(helper_imul_8_16): |
| 521 | pushl %eax |
| 522 | pushl %edx |
| 523 | movb 16(%esp), %al |
| 524 | imulb 12(%esp) |
| 525 | movw %ax, 16(%esp) |
| 526 | popl %edx |
| 527 | popl %eax |
| 528 | ret |
| 529 | |
| 530 | |
| 531 | |
| 532 | |
| 533 | |
| 534 | |
| 535 | /* Unsigned 32-to-64 multiply. */ |
| 536 | .globl VG_(helper_mul_32_64) |
| 537 | VG_(helper_mul_32_64): |
| 538 | pushl %eax |
| 539 | pushl %edx |
| 540 | movl 16(%esp), %eax |
| 541 | mull 12(%esp) |
| 542 | movl %eax, 16(%esp) |
| 543 | movl %edx, 12(%esp) |
| 544 | popl %edx |
| 545 | popl %eax |
| 546 | ret |
| 547 | |
| 548 | /* Unsigned 16-to-32 multiply. */ |
| 549 | .globl VG_(helper_mul_16_32) |
| 550 | VG_(helper_mul_16_32): |
| 551 | pushl %eax |
| 552 | pushl %edx |
| 553 | movw 16(%esp), %ax |
| 554 | mulw 12(%esp) |
| 555 | movw %ax, 16(%esp) |
| 556 | movw %dx, 12(%esp) |
| 557 | popl %edx |
| 558 | popl %eax |
| 559 | ret |
| 560 | |
| 561 | /* Unsigned 8-to-16 multiply. */ |
| 562 | .globl VG_(helper_mul_8_16) |
| 563 | VG_(helper_mul_8_16): |
| 564 | pushl %eax |
| 565 | pushl %edx |
| 566 | movb 16(%esp), %al |
| 567 | mulb 12(%esp) |
| 568 | movw %ax, 16(%esp) |
| 569 | popl %edx |
| 570 | popl %eax |
| 571 | ret |
| 572 | |
| 573 | |
| 574 | |
| 575 | |
| 576 | /* Unsigned 64-into-32 divide. */ |
| 577 | .globl VG_(helper_div_64_32) |
| 578 | VG_(helper_div_64_32): |
| 579 | pushl %eax |
| 580 | pushl %edx |
| 581 | movl 16(%esp),%eax |
| 582 | movl 12(%esp),%edx |
| 583 | divl 20(%esp) |
| 584 | movl %eax,16(%esp) |
| 585 | movl %edx,12(%esp) |
| 586 | popl %edx |
| 587 | popl %eax |
| 588 | ret |
| 589 | |
| 590 | /* Signed 64-into-32 divide. */ |
| 591 | .globl VG_(helper_idiv_64_32) |
| 592 | VG_(helper_idiv_64_32): |
| 593 | pushl %eax |
| 594 | pushl %edx |
| 595 | movl 16(%esp),%eax |
| 596 | movl 12(%esp),%edx |
| 597 | idivl 20(%esp) |
| 598 | movl %eax,16(%esp) |
| 599 | movl %edx,12(%esp) |
| 600 | popl %edx |
| 601 | popl %eax |
| 602 | ret |
| 603 | |
| 604 | /* Unsigned 32-into-16 divide. */ |
| 605 | .globl VG_(helper_div_32_16) |
| 606 | VG_(helper_div_32_16): |
| 607 | pushl %eax |
| 608 | pushl %edx |
| 609 | movw 16(%esp),%ax |
| 610 | movw 12(%esp),%dx |
| 611 | divw 20(%esp) |
| 612 | movw %ax,16(%esp) |
| 613 | movw %dx,12(%esp) |
| 614 | popl %edx |
| 615 | popl %eax |
| 616 | ret |
| 617 | |
| 618 | /* Signed 32-into-16 divide. */ |
| 619 | .globl VG_(helper_idiv_32_16) |
| 620 | VG_(helper_idiv_32_16): |
| 621 | pushl %eax |
| 622 | pushl %edx |
| 623 | movw 16(%esp),%ax |
| 624 | movw 12(%esp),%dx |
| 625 | idivw 20(%esp) |
| 626 | movw %ax,16(%esp) |
| 627 | movw %dx,12(%esp) |
| 628 | popl %edx |
| 629 | popl %eax |
| 630 | ret |
| 631 | |
| 632 | /* Unsigned 16-into-8 divide. */ |
| 633 | .globl VG_(helper_div_16_8) |
| 634 | VG_(helper_div_16_8): |
| 635 | pushl %eax |
| 636 | movw 12(%esp),%ax |
| 637 | divb 16(%esp) |
| 638 | movb %ah,12(%esp) |
| 639 | movb %al,8(%esp) |
| 640 | popl %eax |
| 641 | ret |
| 642 | |
| 643 | /* Signed 16-into-8 divide. */ |
| 644 | .globl VG_(helper_idiv_16_8) |
| 645 | VG_(helper_idiv_16_8): |
| 646 | pushl %eax |
| 647 | movw 12(%esp),%ax |
| 648 | idivb 16(%esp) |
| 649 | movb %ah,12(%esp) |
| 650 | movb %al,8(%esp) |
| 651 | popl %eax |
| 652 | ret |
| 653 | |
| 654 | |
| 655 | ##--------------------------------------------------------------------## |
| 656 | ##--- end vg_helpers.S ---## |
| 657 | ##--------------------------------------------------------------------## |