blob: 008638e7ef286a413b41cc153911476717aaa453 [file] [log] [blame]
sewardjd20c8852005-01-20 20:04:07 +00001
2/*---------------------------------------------------------------*/
sewardj752f9062010-05-03 21:38:49 +00003/*--- begin guest_amd64_defs.h ---*/
sewardjd20c8852005-01-20 20:04:07 +00004/*---------------------------------------------------------------*/
5
6/*
sewardj752f9062010-05-03 21:38:49 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardjd20c8852005-01-20 20:04:07 +00009
sewardj89ae8472013-10-18 14:12:58 +000010 Copyright (C) 2004-2013 OpenWorks LLP
sewardj752f9062010-05-03 21:38:49 +000011 info@open-works.net
sewardjd20c8852005-01-20 20:04:07 +000012
sewardj752f9062010-05-03 21:38:49 +000013 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
sewardjd20c8852005-01-20 20:04:07 +000017
sewardj752f9062010-05-03 21:38:49 +000018 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
sewardj7bd6ffe2005-08-03 16:07:36 +000026 02110-1301, USA.
27
sewardj752f9062010-05-03 21:38:49 +000028 The GNU General Public License is contained in the file COPYING.
sewardjd20c8852005-01-20 20:04:07 +000029
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
sewardjd20c8852005-01-20 20:04:07 +000034*/
35
36/* Only to be used within the guest-amd64 directory. */
37
sewardjcef7d3e2009-07-02 12:21:59 +000038#ifndef __VEX_GUEST_AMD64_DEFS_H
39#define __VEX_GUEST_AMD64_DEFS_H
sewardjd20c8852005-01-20 20:04:07 +000040
florian58a637b2012-09-30 20:30:17 +000041#include "libvex_basictypes.h"
42#include "libvex_emnote.h" // VexEmNote
43#include "libvex_guest_amd64.h" // VexGuestAMD64State
44#include "guest_generic_bb_to_IR.h" // DisResult
sewardjd20c8852005-01-20 20:04:07 +000045
46/*---------------------------------------------------------*/
47/*--- amd64 to IR conversion ---*/
48/*---------------------------------------------------------*/
49
sewardj9e6491a2005-07-02 19:24:10 +000050/* Convert one amd64 insn to IR. See the type DisOneInstrFn in
51 bb_to_IR.h. */
sewardjd20c8852005-01-20 20:04:07 +000052extern
sewardjdd40fdf2006-12-24 02:20:24 +000053DisResult disInstr_AMD64 ( IRSB* irbb,
sewardjc716aea2006-01-17 01:48:46 +000054 Bool (*resteerOkFn) ( void*, Addr64 ),
sewardj984d9b12010-01-15 10:53:21 +000055 Bool resteerCisOk,
sewardjc716aea2006-01-17 01:48:46 +000056 void* callback_opaque,
sewardj9e6491a2005-07-02 19:24:10 +000057 UChar* guest_code,
58 Long delta,
59 Addr64 guest_IP,
sewardja5f55da2006-04-30 23:37:32 +000060 VexArch guest_arch,
sewardj9e6491a2005-07-02 19:24:10 +000061 VexArchInfo* archinfo,
sewardjdd40fdf2006-12-24 02:20:24 +000062 VexAbiInfo* abiinfo,
sewardj9b769162014-07-24 12:42:03 +000063 VexEndness host_endness,
sewardj442e51a2012-12-06 18:08:04 +000064 Bool sigill_diag );
sewardjd20c8852005-01-20 20:04:07 +000065
66/* Used by the optimiser to specialise calls to helpers. */
67extern
florian1ff47562012-10-21 02:09:51 +000068IRExpr* guest_amd64_spechelper ( const HChar* function_name,
sewardjbe917912010-08-22 12:38:53 +000069 IRExpr** args,
70 IRStmt** precedingStmts,
71 Int n_precedingStmts );
sewardjd20c8852005-01-20 20:04:07 +000072
73/* Describes to the optimiser which part of the guest state require
74 precise memory exceptions. This is logically part of the guest
75 state description. */
76extern
77Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int );
78
79extern
80VexGuestLayout amd64guest_layout;
81
82
83/*---------------------------------------------------------*/
84/*--- amd64 guest helpers ---*/
85/*---------------------------------------------------------*/
86
87/* --- CLEAN HELPERS --- */
88
89extern ULong amd64g_calculate_rflags_all (
90 ULong cc_op,
91 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
92 );
93
sewardjdf0e0022005-01-25 15:48:43 +000094extern ULong amd64g_calculate_rflags_c (
sewardjd20c8852005-01-20 20:04:07 +000095 ULong cc_op,
96 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
97 );
98
99extern ULong amd64g_calculate_condition (
100 ULong/*AMD64Condcode*/ cond,
101 ULong cc_op,
102 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
103 );
104
sewardj4f9847d2005-07-25 11:58:34 +0000105extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
sewardjd20c8852005-01-20 20:04:07 +0000106
sewardj112b0992005-07-23 13:19:32 +0000107extern ULong amd64g_calculate_RCR (
108 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
109 );
sewardjd20c8852005-01-20 20:04:07 +0000110
sewardjb5e5c6d2007-01-12 20:29:01 +0000111extern ULong amd64g_calculate_RCL (
112 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
113 );
114
sewardj1a179b52010-09-28 19:56:32 +0000115extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
116
sewardj5e205372005-05-09 02:57:08 +0000117extern ULong amd64g_check_fldcw ( ULong fpucw );
sewardjd20c8852005-01-20 20:04:07 +0000118
sewardj5e205372005-05-09 02:57:08 +0000119extern ULong amd64g_create_fpucw ( ULong fpround );
sewardjd20c8852005-01-20 20:04:07 +0000120
sewardjbcbb9de2005-03-27 02:22:32 +0000121extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
sewardjd20c8852005-01-20 20:04:07 +0000122
sewardjbcbb9de2005-03-27 02:22:32 +0000123extern ULong amd64g_create_mxcsr ( ULong sseround );
sewardjd20c8852005-01-20 20:04:07 +0000124
florian6ef84be2012-08-26 03:20:07 +0000125extern VexEmNote amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
126extern VexEmNote amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
127extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
sewardj4017a3b2005-06-13 12:17:27 +0000128
sewardj9ae42a72012-02-16 14:18:56 +0000129extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
130extern void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State*, HWord );
131extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
sewardj4017a3b2005-06-13 12:17:27 +0000132
sewardjd20c8852005-01-20 20:04:07 +0000133/* Translate a guest virtual_addr into a guest linear address by
134 consulting the supplied LDT/GDT structures. Their representation
135 must be as specified in pub/libvex_guest_amd64.h. To indicate a
136 translation failure, 1<<32 is returned. On success, the lower 32
137 bits of the returned result indicate the linear address.
138*/
139//extern
140//ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
141// UInt seg_selector, UInt virtual_addr );
142
143extern ULong amd64g_calculate_mmx_pmaddwd ( ULong, ULong );
144extern ULong amd64g_calculate_mmx_psadbw ( ULong, ULong );
sewardjd20c8852005-01-20 20:04:07 +0000145
sewardj8cb931e2012-02-16 22:02:14 +0000146extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
147
sewardj186f8692011-01-21 17:51:44 +0000148extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
149extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
150extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
151extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
sewardjd20c8852005-01-20 20:04:07 +0000152
sewardj4d5bce22012-02-21 11:02:44 +0000153extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
154 ULong dHi, ULong dLo,
155 ULong imm_and_return_control_bit );
156
sewardjcc3d2192013-03-27 11:37:33 +0000157extern ULong amd64g_calculate_pext ( ULong, ULong );
158extern ULong amd64g_calculate_pdep ( ULong, ULong );
159
sewardjd20c8852005-01-20 20:04:07 +0000160/* --- DIRTY HELPERS --- */
161
sewardj8707fef2005-08-23 23:26:37 +0000162extern ULong amd64g_dirtyhelper_loadF80le ( ULong/*addr*/ );
sewardjd20c8852005-01-20 20:04:07 +0000163
sewardj8707fef2005-08-23 23:26:37 +0000164extern void amd64g_dirtyhelper_storeF80le ( ULong/*addr*/, ULong/*data*/ );
sewardjd20c8852005-01-20 20:04:07 +0000165
sewardje9d8a262009-07-01 08:06:34 +0000166extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
167extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
sewardj0b2d3fe2010-08-06 07:59:38 +0000168extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
sewardjfe0c5e72012-06-15 15:48:07 +0000169extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
sewardjd0a12df2005-02-10 02:07:43 +0000170
sewardj0585a032005-11-05 02:55:06 +0000171extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
172
sewardj5556e5e2011-01-21 18:05:19 +0000173extern void amd64g_dirtyhelper_FXSAVE ( VexGuestAMD64State*, HWord );
florian6ef84be2012-08-26 03:20:07 +0000174extern VexEmNote amd64g_dirtyhelper_FXRSTOR ( VexGuestAMD64State*, HWord );
sewardj5abcfe62007-01-10 04:59:33 +0000175
sewardjbc6af532005-08-23 23:16:51 +0000176extern ULong amd64g_dirtyhelper_RDTSC ( void );
sewardj818c7302013-03-26 13:53:18 +0000177extern void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
sewardjbc6af532005-08-23 23:16:51 +0000178
sewardjbb4396c2007-11-20 17:29:08 +0000179extern ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ );
180extern void amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
181 ULong sz/*1,2 or 4*/ );
182
sewardjb9dc2432010-06-07 16:22:22 +0000183extern void amd64g_dirtyhelper_SxDT ( void* address,
184 ULong op /* 0 or 1 */ );
185
sewardjacfbd7d2010-08-17 22:52:08 +0000186/* Helps with PCMP{I,E}STR{I,M}.
187
188 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
189 actually it could be a clean helper, but for the fact that we can't
190 pass by value 2 x V128 to a clean helper, nor have one returned.)
191 Reads guest state, writes to guest state for the xSTRM cases, no
192 accesses of memory, is a pure function.
193
194 opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
195 the callee knows which I/E and I/M variant it is dealing with and
196 what the specific operation is. 4th byte of opcode is in the range
197 0x60 to 0x63:
198 istri 66 0F 3A 63
199 istrm 66 0F 3A 62
200 estri 66 0F 3A 61
201 estrm 66 0F 3A 60
202
203 gstOffL and gstOffR are the guest state offsets for the two XMM
204 register inputs. We never have to deal with the memory case since
205 that is handled by pre-loading the relevant value into the fake
206 XMM16 register.
207
208 For ESTRx variants, edxIN and eaxIN hold the values of those two
209 registers.
210
211 In all cases, the bottom 16 bits of the result contain the new
212 OSZACP %rflags values. For xSTRI variants, bits[31:16] of the
213 result hold the new %ecx value. For xSTRM variants, the helper
214 writes the result directly to the guest XMM0.
215
216 Declarable side effects: in all cases, reads guest state at
217 [gstOffL, +16) and [gstOffR, +16). For xSTRM variants, also writes
218 guest_XMM0.
219
220 Is expected to be called with opc_and_imm combinations which have
221 actually been validated, and will assert if otherwise. The front
222 end should ensure we're only called with verified values.
223*/
224extern ULong amd64g_dirtyhelper_PCMPxSTRx (
225 VexGuestAMD64State*,
226 HWord opc4_and_imm,
227 HWord gstOffL, HWord gstOffR,
228 HWord edxIN, HWord eaxIN
229 );
230
philippeff4d6be2012-02-14 21:34:56 +0000231/* Implementation of intel AES instructions as described in
232 Intel Advanced Vector Extensions
233 Programming Reference
234 MARCH 2008
235 319433-002.
236
237 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
238 actually it could be a clean helper, but for the fact that we can't
239 pass by value 2 x V128 to a clean helper, nor have one returned.)
240 Reads guest state, writes to guest state, no
241 accesses of memory, is a pure function.
242
243 opc4 contains the 4th byte of opcode. Front-end should only
244 give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
245 (will assert otherwise).
246
247 gstOffL and gstOffR are the guest state offsets for the two XMM
sewardj1407a362012-06-24 15:11:38 +0000248 register inputs, gstOffD is the guest state offset for the XMM register
249 output. We never have to deal with the memory case since that is handled
250 by pre-loading the relevant value into the fake XMM16 register.
philippeff4d6be2012-02-14 21:34:56 +0000251
252*/
253extern void amd64g_dirtyhelper_AES (
254 VexGuestAMD64State* gst,
sewardj1407a362012-06-24 15:11:38 +0000255 HWord opc4, HWord gstOffD,
philippeff4d6be2012-02-14 21:34:56 +0000256 HWord gstOffL, HWord gstOffR
257 );
258
259/* Implementation of AESKEYGENASSIST.
260
261 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
262 actually it could be a clean helper, but for the fact that we can't
263 pass by value 1 x V128 to a clean helper, nor have one returned.)
264 Reads guest state, writes to guest state, no
265 accesses of memory, is a pure function.
266
267 imm8 is the Round Key constant.
268
269 gstOffL and gstOffR are the guest state offsets for the two XMM
270 register input and output. We never have to deal with the memory case since
271 that is handled by pre-loading the relevant value into the fake
272 XMM16 register.
273
274*/
275extern void amd64g_dirtyhelper_AESKEYGENASSIST (
276 VexGuestAMD64State* gst,
277 HWord imm8,
278 HWord gstOffL, HWord gstOffR
279 );
sewardj0b2d3fe2010-08-06 07:59:38 +0000280
sewardjd20c8852005-01-20 20:04:07 +0000281//extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
282//extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
283//extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
284
285//extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
286
florian6ef84be2012-08-26 03:20:07 +0000287//extern VexEmNote
sewardjd20c8852005-01-20 20:04:07 +0000288// amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
289
290//extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
291
florian6ef84be2012-08-26 03:20:07 +0000292//extern VexEmNote
sewardjd20c8852005-01-20 20:04:07 +0000293// amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
294
sewardjd20c8852005-01-20 20:04:07 +0000295
296
297/*---------------------------------------------------------*/
298/*--- Condition code stuff ---*/
299/*---------------------------------------------------------*/
300
301/* rflags masks */
302#define AMD64G_CC_SHIFT_O 11
303#define AMD64G_CC_SHIFT_S 7
304#define AMD64G_CC_SHIFT_Z 6
305#define AMD64G_CC_SHIFT_A 4
306#define AMD64G_CC_SHIFT_C 0
307#define AMD64G_CC_SHIFT_P 2
308
sewardj5328b102006-05-21 12:02:44 +0000309#define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O)
310#define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S)
311#define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z)
312#define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A)
313#define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
314#define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
sewardjd20c8852005-01-20 20:04:07 +0000315
316/* FPU flag masks */
sewardj4f9847d2005-07-25 11:58:34 +0000317#define AMD64G_FC_SHIFT_C3 14
318#define AMD64G_FC_SHIFT_C2 10
319#define AMD64G_FC_SHIFT_C1 9
320#define AMD64G_FC_SHIFT_C0 8
321
sewardj5328b102006-05-21 12:02:44 +0000322#define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3)
323#define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2)
324#define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1)
325#define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0)
sewardj4f9847d2005-07-25 11:58:34 +0000326
sewardjd20c8852005-01-20 20:04:07 +0000327
328/* %RFLAGS thunk descriptors. A four-word thunk is used to record
329 details of the most recent flag-setting operation, so the flags can
330 be computed later if needed. It is possible to do this a little
331 more efficiently using a 3-word thunk, but that makes it impossible
332 to describe the flag data dependencies sufficiently accurately for
333 Memcheck. Hence 4 words are used, with minimal loss of efficiency.
334
335 The four words are:
336
337 CC_OP, which describes the operation.
338
339 CC_DEP1 and CC_DEP2. These are arguments to the operation.
340 We want Memcheck to believe that the resulting flags are
341 data-dependent on both CC_DEP1 and CC_DEP2, hence the
342 name DEP.
343
344 CC_NDEP. This is a 3rd argument to the operation which is
345 sometimes needed. We arrange things so that Memcheck does
346 not believe the resulting flags are data-dependent on CC_NDEP
347 ("not dependent").
348
349 To make Memcheck believe that (the definedness of) the encoded
350 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
351 requires two things:
352
353 (1) In the guest state layout info (amd64guest_layout), CC_OP and
354 CC_NDEP are marked as always defined.
355
356 (2) When passing the thunk components to an evaluation function
357 (calculate_condition, calculate_eflags, calculate_eflags_c) the
358 IRCallee's mcx_mask must be set so as to exclude from
359 consideration all passed args except CC_DEP1 and CC_DEP2.
360
361 Strictly speaking only (2) is necessary for correctness. However,
362 (1) helps efficiency in that since (2) means we never ask about the
363 definedness of CC_OP or CC_NDEP, we may as well not even bother to
364 track their definedness.
365
366 When building the thunk, it is always necessary to write words into
367 CC_DEP1 and CC_DEP2, even if those args are not used given the
368 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
369 This is important because otherwise Memcheck could give false
370 positives as it does not understand the relationship between the
371 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
372 definedness of the stored flags always depends on both CC_DEP1 and
373 CC_DEP2.
374
375 However, it is only necessary to set CC_NDEP when the CC_OP value
376 requires it, because Memcheck ignores CC_NDEP, and the evaluation
377 functions do understand the CC_OP fields and will only examine
378 CC_NDEP for suitable values of CC_OP.
379
380 A summary of the field usages is:
381
382 Operation DEP1 DEP2 NDEP
383 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
384
385 add/sub/mul first arg second arg unused
386
387 adc/sbb first arg (second arg)
388 XOR old_carry old_carry
389
390 and/or/xor result zero unused
391
392 inc/dec result zero old_carry
393
394 shl/shr/sar result subshifted- unused
395 result
396
397 rol/ror result zero old_flags
398
399 copy old_flags zero unused.
400
401
402 Therefore Memcheck will believe the following:
403
404 * add/sub/mul -- definedness of result flags depends on definedness
405 of both args.
406
407 * adc/sbb -- definedness of result flags depends on definedness of
408 both args and definedness of the old C flag. Because only two
409 DEP fields are available, the old C flag is XOR'd into the second
410 arg so that Memcheck sees the data dependency on it. That means
411 the NDEP field must contain a second copy of the old C flag
412 so that the evaluation functions can correctly recover the second
413 arg.
414
415 * and/or/xor are straightforward -- definedness of result flags
416 depends on definedness of result value.
417
418 * inc/dec -- definedness of result flags depends only on
419 definedness of result. This isn't really true -- it also depends
420 on the old C flag. However, we don't want Memcheck to see that,
421 and so the old C flag must be passed in NDEP and not in DEP2.
422 It's inconceivable that a compiler would generate code that puts
423 the C flag in an undefined state, then does an inc/dec, which
424 leaves C unchanged, and then makes a conditional jump/move based
425 on C. So our fiction seems a good approximation.
426
427 * shl/shr/sar -- straightforward, again, definedness of result
428 flags depends on definedness of result value. The subshifted
429 value (value shifted one less) is also needed, but its
430 definedness is the same as the definedness of the shifted value.
431
432 * rol/ror -- these only set O and C, and leave A Z C P alone.
433 However it seems prudent (as per inc/dec) to say the definedness
434 of all resulting flags depends on the definedness of the result,
435 hence the old flags must go in as NDEP and not DEP2.
436
437 * rcl/rcr are too difficult to do in-line, and so are done by a
438 helper function. They are not part of this scheme. The helper
439 function takes the value to be rotated, the rotate amount and the
440 old flags, and returns the new flags and the rotated value.
441 Since the helper's mcx_mask does not have any set bits, Memcheck
442 will lazily propagate undefinedness from any of the 3 args into
443 both results (flags and actual value).
444*/
445enum {
sewardjdf0e0022005-01-25 15:48:43 +0000446 AMD64G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
sewardjd20c8852005-01-20 20:04:07 +0000447 /* just copy DEP1 to output */
448
449 AMD64G_CC_OP_ADDB, /* 1 */
450 AMD64G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
451 AMD64G_CC_OP_ADDL, /* 3 */
sewardjdf0e0022005-01-25 15:48:43 +0000452 AMD64G_CC_OP_ADDQ, /* 4 */
sewardjd20c8852005-01-20 20:04:07 +0000453
sewardjdf0e0022005-01-25 15:48:43 +0000454 AMD64G_CC_OP_SUBB, /* 5 */
455 AMD64G_CC_OP_SUBW, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
456 AMD64G_CC_OP_SUBL, /* 7 */
457 AMD64G_CC_OP_SUBQ, /* 8 */
sewardjd20c8852005-01-20 20:04:07 +0000458
sewardjdf0e0022005-01-25 15:48:43 +0000459 AMD64G_CC_OP_ADCB, /* 9 */
460 AMD64G_CC_OP_ADCW, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
461 AMD64G_CC_OP_ADCL, /* 11 */
462 AMD64G_CC_OP_ADCQ, /* 12 */
sewardjd20c8852005-01-20 20:04:07 +0000463
sewardjdf0e0022005-01-25 15:48:43 +0000464 AMD64G_CC_OP_SBBB, /* 13 */
465 AMD64G_CC_OP_SBBW, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
466 AMD64G_CC_OP_SBBL, /* 15 */
467 AMD64G_CC_OP_SBBQ, /* 16 */
sewardjd20c8852005-01-20 20:04:07 +0000468
sewardjdf0e0022005-01-25 15:48:43 +0000469 AMD64G_CC_OP_LOGICB, /* 17 */
470 AMD64G_CC_OP_LOGICW, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
471 AMD64G_CC_OP_LOGICL, /* 19 */
472 AMD64G_CC_OP_LOGICQ, /* 20 */
sewardjd20c8852005-01-20 20:04:07 +0000473
sewardjdf0e0022005-01-25 15:48:43 +0000474 AMD64G_CC_OP_INCB, /* 21 */
475 AMD64G_CC_OP_INCW, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
476 AMD64G_CC_OP_INCL, /* 23 */
477 AMD64G_CC_OP_INCQ, /* 24 */
sewardjd20c8852005-01-20 20:04:07 +0000478
sewardjdf0e0022005-01-25 15:48:43 +0000479 AMD64G_CC_OP_DECB, /* 25 */
480 AMD64G_CC_OP_DECW, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
481 AMD64G_CC_OP_DECL, /* 27 */
482 AMD64G_CC_OP_DECQ, /* 28 */
sewardjd20c8852005-01-20 20:04:07 +0000483
sewardjdf0e0022005-01-25 15:48:43 +0000484 AMD64G_CC_OP_SHLB, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
485 AMD64G_CC_OP_SHLW, /* 30 where res' is like res but shifted one bit less */
486 AMD64G_CC_OP_SHLL, /* 31 */
487 AMD64G_CC_OP_SHLQ, /* 32 */
sewardjd20c8852005-01-20 20:04:07 +0000488
sewardjdf0e0022005-01-25 15:48:43 +0000489 AMD64G_CC_OP_SHRB, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
490 AMD64G_CC_OP_SHRW, /* 34 where res' is like res but shifted one bit less */
491 AMD64G_CC_OP_SHRL, /* 35 */
492 AMD64G_CC_OP_SHRQ, /* 36 */
sewardjd20c8852005-01-20 20:04:07 +0000493
sewardjdf0e0022005-01-25 15:48:43 +0000494 AMD64G_CC_OP_ROLB, /* 37 */
495 AMD64G_CC_OP_ROLW, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
496 AMD64G_CC_OP_ROLL, /* 39 */
497 AMD64G_CC_OP_ROLQ, /* 40 */
sewardjd20c8852005-01-20 20:04:07 +0000498
sewardjdf0e0022005-01-25 15:48:43 +0000499 AMD64G_CC_OP_RORB, /* 41 */
500 AMD64G_CC_OP_RORW, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
501 AMD64G_CC_OP_RORL, /* 43 */
502 AMD64G_CC_OP_RORQ, /* 44 */
sewardjd20c8852005-01-20 20:04:07 +0000503
sewardjdf0e0022005-01-25 15:48:43 +0000504 AMD64G_CC_OP_UMULB, /* 45 */
505 AMD64G_CC_OP_UMULW, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
506 AMD64G_CC_OP_UMULL, /* 47 */
sewardj32b2bbe2005-01-28 00:50:10 +0000507 AMD64G_CC_OP_UMULQ, /* 48 */
sewardjd20c8852005-01-20 20:04:07 +0000508
sewardj32b2bbe2005-01-28 00:50:10 +0000509 AMD64G_CC_OP_SMULB, /* 49 */
510 AMD64G_CC_OP_SMULW, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
511 AMD64G_CC_OP_SMULL, /* 51 */
512 AMD64G_CC_OP_SMULQ, /* 52 */
sewardjd20c8852005-01-20 20:04:07 +0000513
sewardjcc3d2192013-03-27 11:37:33 +0000514 AMD64G_CC_OP_ANDN32, /* 53 */
515 AMD64G_CC_OP_ANDN64, /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
516
517 AMD64G_CC_OP_BLSI32, /* 55 */
518 AMD64G_CC_OP_BLSI64, /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
519
520 AMD64G_CC_OP_BLSMSK32,/* 57 */
521 AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
522
523 AMD64G_CC_OP_BLSR32, /* 59 */
524 AMD64G_CC_OP_BLSR64, /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
525
sewardjd20c8852005-01-20 20:04:07 +0000526 AMD64G_CC_OP_NUMBER
527};
528
529typedef
530 enum {
531 AMD64CondO = 0, /* overflow */
532 AMD64CondNO = 1, /* no overflow */
533
534 AMD64CondB = 2, /* below */
535 AMD64CondNB = 3, /* not below */
536
537 AMD64CondZ = 4, /* zero */
538 AMD64CondNZ = 5, /* not zero */
539
540 AMD64CondBE = 6, /* below or equal */
541 AMD64CondNBE = 7, /* not below or equal */
542
543 AMD64CondS = 8, /* negative */
544 AMD64CondNS = 9, /* not negative */
545
546 AMD64CondP = 10, /* parity even */
547 AMD64CondNP = 11, /* not parity even */
548
549 AMD64CondL = 12, /* jump less */
550 AMD64CondNL = 13, /* not less */
551
552 AMD64CondLE = 14, /* less or equal */
553 AMD64CondNLE = 15, /* not less or equal */
554
555 AMD64CondAlways = 16 /* HACK */
556 }
557 AMD64Condcode;
558
sewardjcef7d3e2009-07-02 12:21:59 +0000559#endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
sewardjd20c8852005-01-20 20:04:07 +0000560
561/*---------------------------------------------------------------*/
sewardjcef7d3e2009-07-02 12:21:59 +0000562/*--- end guest_amd64_defs.h ---*/
sewardjd20c8852005-01-20 20:04:07 +0000563/*---------------------------------------------------------------*/