blob: e3d23f0a14b67739449ada3455c58dce33e75c2f [file] [log] [blame]
sewardjd20c8852005-01-20 20:04:07 +00001
2/*---------------------------------------------------------------*/
sewardj752f9062010-05-03 21:38:49 +00003/*--- begin guest_amd64_defs.h ---*/
sewardjd20c8852005-01-20 20:04:07 +00004/*---------------------------------------------------------------*/
5
6/*
sewardj752f9062010-05-03 21:38:49 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardjd20c8852005-01-20 20:04:07 +00009
sewardj89ae8472013-10-18 14:12:58 +000010 Copyright (C) 2004-2013 OpenWorks LLP
sewardj752f9062010-05-03 21:38:49 +000011 info@open-works.net
sewardjd20c8852005-01-20 20:04:07 +000012
sewardj752f9062010-05-03 21:38:49 +000013 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
sewardjd20c8852005-01-20 20:04:07 +000017
sewardj752f9062010-05-03 21:38:49 +000018 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
sewardj7bd6ffe2005-08-03 16:07:36 +000026 02110-1301, USA.
27
sewardj752f9062010-05-03 21:38:49 +000028 The GNU General Public License is contained in the file COPYING.
sewardjd20c8852005-01-20 20:04:07 +000029
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
sewardjd20c8852005-01-20 20:04:07 +000034*/
35
36/* Only to be used within the guest-amd64 directory. */
37
sewardjcef7d3e2009-07-02 12:21:59 +000038#ifndef __VEX_GUEST_AMD64_DEFS_H
39#define __VEX_GUEST_AMD64_DEFS_H
sewardjd20c8852005-01-20 20:04:07 +000040
florian58a637b2012-09-30 20:30:17 +000041#include "libvex_basictypes.h"
42#include "libvex_emnote.h" // VexEmNote
43#include "libvex_guest_amd64.h" // VexGuestAMD64State
44#include "guest_generic_bb_to_IR.h" // DisResult
sewardjd20c8852005-01-20 20:04:07 +000045
46/*---------------------------------------------------------*/
47/*--- amd64 to IR conversion ---*/
48/*---------------------------------------------------------*/
49
sewardj9e6491a2005-07-02 19:24:10 +000050/* Convert one amd64 insn to IR. See the type DisOneInstrFn in
51 bb_to_IR.h. */
sewardjd20c8852005-01-20 20:04:07 +000052extern
sewardjdd40fdf2006-12-24 02:20:24 +000053DisResult disInstr_AMD64 ( IRSB* irbb,
sewardjc716aea2006-01-17 01:48:46 +000054 Bool (*resteerOkFn) ( void*, Addr64 ),
sewardj984d9b12010-01-15 10:53:21 +000055 Bool resteerCisOk,
sewardjc716aea2006-01-17 01:48:46 +000056 void* callback_opaque,
florian8462d112014-09-24 15:18:09 +000057 const UChar* guest_code,
sewardj9e6491a2005-07-02 19:24:10 +000058 Long delta,
59 Addr64 guest_IP,
sewardja5f55da2006-04-30 23:37:32 +000060 VexArch guest_arch,
sewardj9e6491a2005-07-02 19:24:10 +000061 VexArchInfo* archinfo,
sewardjdd40fdf2006-12-24 02:20:24 +000062 VexAbiInfo* abiinfo,
sewardj9b769162014-07-24 12:42:03 +000063 VexEndness host_endness,
sewardj442e51a2012-12-06 18:08:04 +000064 Bool sigill_diag );
sewardjd20c8852005-01-20 20:04:07 +000065
66/* Used by the optimiser to specialise calls to helpers. */
67extern
florian1ff47562012-10-21 02:09:51 +000068IRExpr* guest_amd64_spechelper ( const HChar* function_name,
sewardjbe917912010-08-22 12:38:53 +000069 IRExpr** args,
70 IRStmt** precedingStmts,
71 Int n_precedingStmts );
sewardjd20c8852005-01-20 20:04:07 +000072
73/* Describes to the optimiser which part of the guest state require
74 precise memory exceptions. This is logically part of the guest
75 state description. */
76extern
77Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int );
78
79extern
80VexGuestLayout amd64guest_layout;
81
82
83/*---------------------------------------------------------*/
84/*--- amd64 guest helpers ---*/
85/*---------------------------------------------------------*/
86
87/* --- CLEAN HELPERS --- */
88
89extern ULong amd64g_calculate_rflags_all (
90 ULong cc_op,
91 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
92 );
93
sewardjdf0e0022005-01-25 15:48:43 +000094extern ULong amd64g_calculate_rflags_c (
sewardjd20c8852005-01-20 20:04:07 +000095 ULong cc_op,
96 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
97 );
98
99extern ULong amd64g_calculate_condition (
100 ULong/*AMD64Condcode*/ cond,
101 ULong cc_op,
102 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
103 );
104
sewardj4f9847d2005-07-25 11:58:34 +0000105extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
sewardjd20c8852005-01-20 20:04:07 +0000106
sewardj112b0992005-07-23 13:19:32 +0000107extern ULong amd64g_calculate_RCR (
108 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
109 );
sewardjd20c8852005-01-20 20:04:07 +0000110
sewardjb5e5c6d2007-01-12 20:29:01 +0000111extern ULong amd64g_calculate_RCL (
112 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
113 );
114
sewardj1a179b52010-09-28 19:56:32 +0000115extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
116
sewardj5e205372005-05-09 02:57:08 +0000117extern ULong amd64g_check_fldcw ( ULong fpucw );
sewardjd20c8852005-01-20 20:04:07 +0000118
sewardj5e205372005-05-09 02:57:08 +0000119extern ULong amd64g_create_fpucw ( ULong fpround );
sewardjd20c8852005-01-20 20:04:07 +0000120
sewardjbcbb9de2005-03-27 02:22:32 +0000121extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
sewardjd20c8852005-01-20 20:04:07 +0000122
sewardjbcbb9de2005-03-27 02:22:32 +0000123extern ULong amd64g_create_mxcsr ( ULong sseround );
sewardjd20c8852005-01-20 20:04:07 +0000124
florian6ef84be2012-08-26 03:20:07 +0000125extern VexEmNote amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
126extern VexEmNote amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
127extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
sewardj4017a3b2005-06-13 12:17:27 +0000128
sewardj9ae42a72012-02-16 14:18:56 +0000129extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
130extern void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State*, HWord );
131extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
sewardj4017a3b2005-06-13 12:17:27 +0000132
sewardjd20c8852005-01-20 20:04:07 +0000133/* Translate a guest virtual_addr into a guest linear address by
134 consulting the supplied LDT/GDT structures. Their representation
135 must be as specified in pub/libvex_guest_amd64.h. To indicate a
136 translation failure, 1<<32 is returned. On success, the lower 32
137 bits of the returned result indicate the linear address.
138*/
139//extern
140//ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
141// UInt seg_selector, UInt virtual_addr );
142
143extern ULong amd64g_calculate_mmx_pmaddwd ( ULong, ULong );
144extern ULong amd64g_calculate_mmx_psadbw ( ULong, ULong );
sewardjd20c8852005-01-20 20:04:07 +0000145
sewardj8cb931e2012-02-16 22:02:14 +0000146extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
147
sewardj186f8692011-01-21 17:51:44 +0000148extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
149extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
150extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
151extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
sewardjd20c8852005-01-20 20:04:07 +0000152
sewardj4d5bce22012-02-21 11:02:44 +0000153extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
154 ULong dHi, ULong dLo,
155 ULong imm_and_return_control_bit );
156
sewardjcc3d2192013-03-27 11:37:33 +0000157extern ULong amd64g_calculate_pext ( ULong, ULong );
158extern ULong amd64g_calculate_pdep ( ULong, ULong );
159
sewardjd20c8852005-01-20 20:04:07 +0000160/* --- DIRTY HELPERS --- */
161
sewardj8707fef2005-08-23 23:26:37 +0000162extern ULong amd64g_dirtyhelper_loadF80le ( ULong/*addr*/ );
sewardjd20c8852005-01-20 20:04:07 +0000163
sewardj8707fef2005-08-23 23:26:37 +0000164extern void amd64g_dirtyhelper_storeF80le ( ULong/*addr*/, ULong/*data*/ );
sewardjd20c8852005-01-20 20:04:07 +0000165
sewardje9d8a262009-07-01 08:06:34 +0000166extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
167extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
sewardj0b2d3fe2010-08-06 07:59:38 +0000168extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
sewardjfe0c5e72012-06-15 15:48:07 +0000169extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
sewardjd0a12df2005-02-10 02:07:43 +0000170
sewardj0585a032005-11-05 02:55:06 +0000171extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
172
sewardj28d71ed2014-09-07 23:23:17 +0000173extern void amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM
174 ( VexGuestAMD64State*, HWord );
175extern VexEmNote amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM
176 ( VexGuestAMD64State*, HWord );
sewardj5abcfe62007-01-10 04:59:33 +0000177
sewardjbc6af532005-08-23 23:16:51 +0000178extern ULong amd64g_dirtyhelper_RDTSC ( void );
sewardj818c7302013-03-26 13:53:18 +0000179extern void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
sewardjbc6af532005-08-23 23:16:51 +0000180
sewardjbb4396c2007-11-20 17:29:08 +0000181extern ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ );
182extern void amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
183 ULong sz/*1,2 or 4*/ );
184
sewardjb9dc2432010-06-07 16:22:22 +0000185extern void amd64g_dirtyhelper_SxDT ( void* address,
186 ULong op /* 0 or 1 */ );
187
sewardjacfbd7d2010-08-17 22:52:08 +0000188/* Helps with PCMP{I,E}STR{I,M}.
189
190 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
191 actually it could be a clean helper, but for the fact that we can't
192 pass by value 2 x V128 to a clean helper, nor have one returned.)
193 Reads guest state, writes to guest state for the xSTRM cases, no
194 accesses of memory, is a pure function.
195
196 opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
197 the callee knows which I/E and I/M variant it is dealing with and
198 what the specific operation is. 4th byte of opcode is in the range
199 0x60 to 0x63:
200 istri 66 0F 3A 63
201 istrm 66 0F 3A 62
202 estri 66 0F 3A 61
203 estrm 66 0F 3A 60
204
205 gstOffL and gstOffR are the guest state offsets for the two XMM
206 register inputs. We never have to deal with the memory case since
207 that is handled by pre-loading the relevant value into the fake
208 XMM16 register.
209
210 For ESTRx variants, edxIN and eaxIN hold the values of those two
211 registers.
212
213 In all cases, the bottom 16 bits of the result contain the new
214 OSZACP %rflags values. For xSTRI variants, bits[31:16] of the
215 result hold the new %ecx value. For xSTRM variants, the helper
216 writes the result directly to the guest XMM0.
217
218 Declarable side effects: in all cases, reads guest state at
219 [gstOffL, +16) and [gstOffR, +16). For xSTRM variants, also writes
220 guest_XMM0.
221
222 Is expected to be called with opc_and_imm combinations which have
223 actually been validated, and will assert if otherwise. The front
224 end should ensure we're only called with verified values.
225*/
226extern ULong amd64g_dirtyhelper_PCMPxSTRx (
227 VexGuestAMD64State*,
228 HWord opc4_and_imm,
229 HWord gstOffL, HWord gstOffR,
230 HWord edxIN, HWord eaxIN
231 );
232
philippeff4d6be2012-02-14 21:34:56 +0000233/* Implementation of intel AES instructions as described in
234 Intel Advanced Vector Extensions
235 Programming Reference
236 MARCH 2008
237 319433-002.
238
239 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
240 actually it could be a clean helper, but for the fact that we can't
241 pass by value 2 x V128 to a clean helper, nor have one returned.)
242 Reads guest state, writes to guest state, no
243 accesses of memory, is a pure function.
244
245 opc4 contains the 4th byte of opcode. Front-end should only
246 give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
247 (will assert otherwise).
248
249 gstOffL and gstOffR are the guest state offsets for the two XMM
sewardj1407a362012-06-24 15:11:38 +0000250 register inputs, gstOffD is the guest state offset for the XMM register
251 output. We never have to deal with the memory case since that is handled
252 by pre-loading the relevant value into the fake XMM16 register.
philippeff4d6be2012-02-14 21:34:56 +0000253
254*/
255extern void amd64g_dirtyhelper_AES (
256 VexGuestAMD64State* gst,
sewardj1407a362012-06-24 15:11:38 +0000257 HWord opc4, HWord gstOffD,
philippeff4d6be2012-02-14 21:34:56 +0000258 HWord gstOffL, HWord gstOffR
259 );
260
261/* Implementation of AESKEYGENASSIST.
262
263 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
264 actually it could be a clean helper, but for the fact that we can't
265 pass by value 1 x V128 to a clean helper, nor have one returned.)
266 Reads guest state, writes to guest state, no
267 accesses of memory, is a pure function.
268
269 imm8 is the Round Key constant.
270
271 gstOffL and gstOffR are the guest state offsets for the two XMM
272 register input and output. We never have to deal with the memory case since
273 that is handled by pre-loading the relevant value into the fake
274 XMM16 register.
275
276*/
277extern void amd64g_dirtyhelper_AESKEYGENASSIST (
278 VexGuestAMD64State* gst,
279 HWord imm8,
280 HWord gstOffL, HWord gstOffR
281 );
sewardj0b2d3fe2010-08-06 07:59:38 +0000282
sewardjd20c8852005-01-20 20:04:07 +0000283//extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
284//extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
285//extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
286
287//extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
288
florian6ef84be2012-08-26 03:20:07 +0000289//extern VexEmNote
sewardjd20c8852005-01-20 20:04:07 +0000290// amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
291
292//extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
293
florian6ef84be2012-08-26 03:20:07 +0000294//extern VexEmNote
sewardjd20c8852005-01-20 20:04:07 +0000295// amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
296
sewardjd20c8852005-01-20 20:04:07 +0000297
298
299/*---------------------------------------------------------*/
300/*--- Condition code stuff ---*/
301/*---------------------------------------------------------*/
302
303/* rflags masks */
304#define AMD64G_CC_SHIFT_O 11
305#define AMD64G_CC_SHIFT_S 7
306#define AMD64G_CC_SHIFT_Z 6
307#define AMD64G_CC_SHIFT_A 4
308#define AMD64G_CC_SHIFT_C 0
309#define AMD64G_CC_SHIFT_P 2
310
sewardj5328b102006-05-21 12:02:44 +0000311#define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O)
312#define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S)
313#define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z)
314#define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A)
315#define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
316#define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
sewardjd20c8852005-01-20 20:04:07 +0000317
318/* FPU flag masks */
sewardj4f9847d2005-07-25 11:58:34 +0000319#define AMD64G_FC_SHIFT_C3 14
320#define AMD64G_FC_SHIFT_C2 10
321#define AMD64G_FC_SHIFT_C1 9
322#define AMD64G_FC_SHIFT_C0 8
323
sewardj5328b102006-05-21 12:02:44 +0000324#define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3)
325#define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2)
326#define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1)
327#define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0)
sewardj4f9847d2005-07-25 11:58:34 +0000328
sewardjd20c8852005-01-20 20:04:07 +0000329
330/* %RFLAGS thunk descriptors. A four-word thunk is used to record
331 details of the most recent flag-setting operation, so the flags can
332 be computed later if needed. It is possible to do this a little
333 more efficiently using a 3-word thunk, but that makes it impossible
334 to describe the flag data dependencies sufficiently accurately for
335 Memcheck. Hence 4 words are used, with minimal loss of efficiency.
336
337 The four words are:
338
339 CC_OP, which describes the operation.
340
341 CC_DEP1 and CC_DEP2. These are arguments to the operation.
342 We want Memcheck to believe that the resulting flags are
343 data-dependent on both CC_DEP1 and CC_DEP2, hence the
344 name DEP.
345
346 CC_NDEP. This is a 3rd argument to the operation which is
347 sometimes needed. We arrange things so that Memcheck does
348 not believe the resulting flags are data-dependent on CC_NDEP
349 ("not dependent").
350
351 To make Memcheck believe that (the definedness of) the encoded
352 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
353 requires two things:
354
355 (1) In the guest state layout info (amd64guest_layout), CC_OP and
356 CC_NDEP are marked as always defined.
357
358 (2) When passing the thunk components to an evaluation function
359 (calculate_condition, calculate_eflags, calculate_eflags_c) the
360 IRCallee's mcx_mask must be set so as to exclude from
361 consideration all passed args except CC_DEP1 and CC_DEP2.
362
363 Strictly speaking only (2) is necessary for correctness. However,
364 (1) helps efficiency in that since (2) means we never ask about the
365 definedness of CC_OP or CC_NDEP, we may as well not even bother to
366 track their definedness.
367
368 When building the thunk, it is always necessary to write words into
369 CC_DEP1 and CC_DEP2, even if those args are not used given the
370 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
371 This is important because otherwise Memcheck could give false
372 positives as it does not understand the relationship between the
373 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
374 definedness of the stored flags always depends on both CC_DEP1 and
375 CC_DEP2.
376
377 However, it is only necessary to set CC_NDEP when the CC_OP value
378 requires it, because Memcheck ignores CC_NDEP, and the evaluation
379 functions do understand the CC_OP fields and will only examine
380 CC_NDEP for suitable values of CC_OP.
381
382 A summary of the field usages is:
383
384 Operation DEP1 DEP2 NDEP
385 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
386
387 add/sub/mul first arg second arg unused
388
389 adc/sbb first arg (second arg)
390 XOR old_carry old_carry
391
392 and/or/xor result zero unused
393
394 inc/dec result zero old_carry
395
396 shl/shr/sar result subshifted- unused
397 result
398
399 rol/ror result zero old_flags
400
401 copy old_flags zero unused.
402
403
404 Therefore Memcheck will believe the following:
405
406 * add/sub/mul -- definedness of result flags depends on definedness
407 of both args.
408
409 * adc/sbb -- definedness of result flags depends on definedness of
410 both args and definedness of the old C flag. Because only two
411 DEP fields are available, the old C flag is XOR'd into the second
412 arg so that Memcheck sees the data dependency on it. That means
413 the NDEP field must contain a second copy of the old C flag
414 so that the evaluation functions can correctly recover the second
415 arg.
416
417 * and/or/xor are straightforward -- definedness of result flags
418 depends on definedness of result value.
419
420 * inc/dec -- definedness of result flags depends only on
421 definedness of result. This isn't really true -- it also depends
422 on the old C flag. However, we don't want Memcheck to see that,
423 and so the old C flag must be passed in NDEP and not in DEP2.
424 It's inconceivable that a compiler would generate code that puts
425 the C flag in an undefined state, then does an inc/dec, which
426 leaves C unchanged, and then makes a conditional jump/move based
427 on C. So our fiction seems a good approximation.
428
429 * shl/shr/sar -- straightforward, again, definedness of result
430 flags depends on definedness of result value. The subshifted
431 value (value shifted one less) is also needed, but its
432 definedness is the same as the definedness of the shifted value.
433
434 * rol/ror -- these only set O and C, and leave A Z C P alone.
435 However it seems prudent (as per inc/dec) to say the definedness
436 of all resulting flags depends on the definedness of the result,
437 hence the old flags must go in as NDEP and not DEP2.
438
439 * rcl/rcr are too difficult to do in-line, and so are done by a
440 helper function. They are not part of this scheme. The helper
441 function takes the value to be rotated, the rotate amount and the
442 old flags, and returns the new flags and the rotated value.
443 Since the helper's mcx_mask does not have any set bits, Memcheck
444 will lazily propagate undefinedness from any of the 3 args into
445 both results (flags and actual value).
446*/
447enum {
sewardjdf0e0022005-01-25 15:48:43 +0000448 AMD64G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
sewardjd20c8852005-01-20 20:04:07 +0000449 /* just copy DEP1 to output */
450
451 AMD64G_CC_OP_ADDB, /* 1 */
452 AMD64G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
453 AMD64G_CC_OP_ADDL, /* 3 */
sewardjdf0e0022005-01-25 15:48:43 +0000454 AMD64G_CC_OP_ADDQ, /* 4 */
sewardjd20c8852005-01-20 20:04:07 +0000455
sewardjdf0e0022005-01-25 15:48:43 +0000456 AMD64G_CC_OP_SUBB, /* 5 */
457 AMD64G_CC_OP_SUBW, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
458 AMD64G_CC_OP_SUBL, /* 7 */
459 AMD64G_CC_OP_SUBQ, /* 8 */
sewardjd20c8852005-01-20 20:04:07 +0000460
sewardjdf0e0022005-01-25 15:48:43 +0000461 AMD64G_CC_OP_ADCB, /* 9 */
462 AMD64G_CC_OP_ADCW, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
463 AMD64G_CC_OP_ADCL, /* 11 */
464 AMD64G_CC_OP_ADCQ, /* 12 */
sewardjd20c8852005-01-20 20:04:07 +0000465
sewardjdf0e0022005-01-25 15:48:43 +0000466 AMD64G_CC_OP_SBBB, /* 13 */
467 AMD64G_CC_OP_SBBW, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
468 AMD64G_CC_OP_SBBL, /* 15 */
469 AMD64G_CC_OP_SBBQ, /* 16 */
sewardjd20c8852005-01-20 20:04:07 +0000470
sewardjdf0e0022005-01-25 15:48:43 +0000471 AMD64G_CC_OP_LOGICB, /* 17 */
472 AMD64G_CC_OP_LOGICW, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
473 AMD64G_CC_OP_LOGICL, /* 19 */
474 AMD64G_CC_OP_LOGICQ, /* 20 */
sewardjd20c8852005-01-20 20:04:07 +0000475
sewardjdf0e0022005-01-25 15:48:43 +0000476 AMD64G_CC_OP_INCB, /* 21 */
477 AMD64G_CC_OP_INCW, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
478 AMD64G_CC_OP_INCL, /* 23 */
479 AMD64G_CC_OP_INCQ, /* 24 */
sewardjd20c8852005-01-20 20:04:07 +0000480
sewardjdf0e0022005-01-25 15:48:43 +0000481 AMD64G_CC_OP_DECB, /* 25 */
482 AMD64G_CC_OP_DECW, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
483 AMD64G_CC_OP_DECL, /* 27 */
484 AMD64G_CC_OP_DECQ, /* 28 */
sewardjd20c8852005-01-20 20:04:07 +0000485
sewardjdf0e0022005-01-25 15:48:43 +0000486 AMD64G_CC_OP_SHLB, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
487 AMD64G_CC_OP_SHLW, /* 30 where res' is like res but shifted one bit less */
488 AMD64G_CC_OP_SHLL, /* 31 */
489 AMD64G_CC_OP_SHLQ, /* 32 */
sewardjd20c8852005-01-20 20:04:07 +0000490
sewardjdf0e0022005-01-25 15:48:43 +0000491 AMD64G_CC_OP_SHRB, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
492 AMD64G_CC_OP_SHRW, /* 34 where res' is like res but shifted one bit less */
493 AMD64G_CC_OP_SHRL, /* 35 */
494 AMD64G_CC_OP_SHRQ, /* 36 */
sewardjd20c8852005-01-20 20:04:07 +0000495
sewardjdf0e0022005-01-25 15:48:43 +0000496 AMD64G_CC_OP_ROLB, /* 37 */
497 AMD64G_CC_OP_ROLW, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
498 AMD64G_CC_OP_ROLL, /* 39 */
499 AMD64G_CC_OP_ROLQ, /* 40 */
sewardjd20c8852005-01-20 20:04:07 +0000500
sewardjdf0e0022005-01-25 15:48:43 +0000501 AMD64G_CC_OP_RORB, /* 41 */
502 AMD64G_CC_OP_RORW, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
503 AMD64G_CC_OP_RORL, /* 43 */
504 AMD64G_CC_OP_RORQ, /* 44 */
sewardjd20c8852005-01-20 20:04:07 +0000505
sewardjdf0e0022005-01-25 15:48:43 +0000506 AMD64G_CC_OP_UMULB, /* 45 */
507 AMD64G_CC_OP_UMULW, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
508 AMD64G_CC_OP_UMULL, /* 47 */
sewardj32b2bbe2005-01-28 00:50:10 +0000509 AMD64G_CC_OP_UMULQ, /* 48 */
sewardjd20c8852005-01-20 20:04:07 +0000510
sewardj32b2bbe2005-01-28 00:50:10 +0000511 AMD64G_CC_OP_SMULB, /* 49 */
512 AMD64G_CC_OP_SMULW, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
513 AMD64G_CC_OP_SMULL, /* 51 */
514 AMD64G_CC_OP_SMULQ, /* 52 */
sewardjd20c8852005-01-20 20:04:07 +0000515
sewardjcc3d2192013-03-27 11:37:33 +0000516 AMD64G_CC_OP_ANDN32, /* 53 */
517 AMD64G_CC_OP_ANDN64, /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
518
519 AMD64G_CC_OP_BLSI32, /* 55 */
520 AMD64G_CC_OP_BLSI64, /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
521
522 AMD64G_CC_OP_BLSMSK32,/* 57 */
523 AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
524
525 AMD64G_CC_OP_BLSR32, /* 59 */
526 AMD64G_CC_OP_BLSR64, /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
527
sewardjd20c8852005-01-20 20:04:07 +0000528 AMD64G_CC_OP_NUMBER
529};
530
531typedef
532 enum {
533 AMD64CondO = 0, /* overflow */
534 AMD64CondNO = 1, /* no overflow */
535
536 AMD64CondB = 2, /* below */
537 AMD64CondNB = 3, /* not below */
538
539 AMD64CondZ = 4, /* zero */
540 AMD64CondNZ = 5, /* not zero */
541
542 AMD64CondBE = 6, /* below or equal */
543 AMD64CondNBE = 7, /* not below or equal */
544
545 AMD64CondS = 8, /* negative */
546 AMD64CondNS = 9, /* not negative */
547
548 AMD64CondP = 10, /* parity even */
549 AMD64CondNP = 11, /* not parity even */
550
sewardjaedb8592014-10-02 16:15:30 +0000551 AMD64CondL = 12, /* less */
sewardjd20c8852005-01-20 20:04:07 +0000552 AMD64CondNL = 13, /* not less */
553
554 AMD64CondLE = 14, /* less or equal */
555 AMD64CondNLE = 15, /* not less or equal */
556
557 AMD64CondAlways = 16 /* HACK */
558 }
559 AMD64Condcode;
560
sewardjcef7d3e2009-07-02 12:21:59 +0000561#endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
sewardjd20c8852005-01-20 20:04:07 +0000562
563/*---------------------------------------------------------------*/
sewardjcef7d3e2009-07-02 12:21:59 +0000564/*--- end guest_amd64_defs.h ---*/
sewardjd20c8852005-01-20 20:04:07 +0000565/*---------------------------------------------------------------*/