blob: 88593e65d61e7ddcf9dc5ff38e189c7a2e2141b9 [file] [log] [blame]
sewardjd20c8852005-01-20 20:04:07 +00001
2/*---------------------------------------------------------------*/
sewardj752f9062010-05-03 21:38:49 +00003/*--- begin guest_amd64_defs.h ---*/
sewardjd20c8852005-01-20 20:04:07 +00004/*---------------------------------------------------------------*/
5
6/*
sewardj752f9062010-05-03 21:38:49 +00007 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
sewardjd20c8852005-01-20 20:04:07 +00009
Elliott Hughesed398002017-06-21 14:41:24 -070010 Copyright (C) 2004-2017 OpenWorks LLP
sewardj752f9062010-05-03 21:38:49 +000011 info@open-works.net
sewardjd20c8852005-01-20 20:04:07 +000012
sewardj752f9062010-05-03 21:38:49 +000013 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
sewardjd20c8852005-01-20 20:04:07 +000017
sewardj752f9062010-05-03 21:38:49 +000018 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
sewardj7bd6ffe2005-08-03 16:07:36 +000026 02110-1301, USA.
27
sewardj752f9062010-05-03 21:38:49 +000028 The GNU General Public License is contained in the file COPYING.
sewardjd20c8852005-01-20 20:04:07 +000029
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
sewardjd20c8852005-01-20 20:04:07 +000034*/
35
36/* Only to be used within the guest-amd64 directory. */
37
sewardjcef7d3e2009-07-02 12:21:59 +000038#ifndef __VEX_GUEST_AMD64_DEFS_H
39#define __VEX_GUEST_AMD64_DEFS_H
sewardjd20c8852005-01-20 20:04:07 +000040
florian58a637b2012-09-30 20:30:17 +000041#include "libvex_basictypes.h"
42#include "libvex_emnote.h" // VexEmNote
43#include "libvex_guest_amd64.h" // VexGuestAMD64State
44#include "guest_generic_bb_to_IR.h" // DisResult
sewardjd20c8852005-01-20 20:04:07 +000045
46/*---------------------------------------------------------*/
47/*--- amd64 to IR conversion ---*/
48/*---------------------------------------------------------*/
49
sewardj9e6491a2005-07-02 19:24:10 +000050/* Convert one amd64 insn to IR. See the type DisOneInstrFn in
Elliott Hughesed398002017-06-21 14:41:24 -070051 guest_generic_bb_to_IR.h. */
sewardjd20c8852005-01-20 20:04:07 +000052extern
sewardjdd40fdf2006-12-24 02:20:24 +000053DisResult disInstr_AMD64 ( IRSB* irbb,
florianbeac5302014-12-31 12:09:38 +000054 Bool (*resteerOkFn) ( void*, Addr ),
sewardj984d9b12010-01-15 10:53:21 +000055 Bool resteerCisOk,
sewardjc716aea2006-01-17 01:48:46 +000056 void* callback_opaque,
florian8462d112014-09-24 15:18:09 +000057 const UChar* guest_code,
sewardj9e6491a2005-07-02 19:24:10 +000058 Long delta,
floriand4cc0de2015-01-02 11:44:12 +000059 Addr guest_IP,
sewardja5f55da2006-04-30 23:37:32 +000060 VexArch guest_arch,
floriancacba8e2014-12-15 18:58:07 +000061 const VexArchInfo* archinfo,
62 const VexAbiInfo* abiinfo,
sewardj9b769162014-07-24 12:42:03 +000063 VexEndness host_endness,
sewardj442e51a2012-12-06 18:08:04 +000064 Bool sigill_diag );
sewardjd20c8852005-01-20 20:04:07 +000065
66/* Used by the optimiser to specialise calls to helpers. */
67extern
florian1ff47562012-10-21 02:09:51 +000068IRExpr* guest_amd64_spechelper ( const HChar* function_name,
sewardjbe917912010-08-22 12:38:53 +000069 IRExpr** args,
70 IRStmt** precedingStmts,
71 Int n_precedingStmts );
sewardjd20c8852005-01-20 20:04:07 +000072
73/* Describes to the optimiser which part of the guest state require
74 precise memory exceptions. This is logically part of the guest
75 state description. */
76extern
sewardjca2c3c72015-02-05 12:53:20 +000077Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int,
78 VexRegisterUpdates );
sewardjd20c8852005-01-20 20:04:07 +000079
80extern
81VexGuestLayout amd64guest_layout;
82
83
84/*---------------------------------------------------------*/
85/*--- amd64 guest helpers ---*/
86/*---------------------------------------------------------*/
87
88/* --- CLEAN HELPERS --- */
89
90extern ULong amd64g_calculate_rflags_all (
91 ULong cc_op,
92 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
93 );
94
sewardjdf0e0022005-01-25 15:48:43 +000095extern ULong amd64g_calculate_rflags_c (
sewardjd20c8852005-01-20 20:04:07 +000096 ULong cc_op,
97 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
98 );
99
100extern ULong amd64g_calculate_condition (
101 ULong/*AMD64Condcode*/ cond,
102 ULong cc_op,
103 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
104 );
105
sewardj4f9847d2005-07-25 11:58:34 +0000106extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
sewardjd20c8852005-01-20 20:04:07 +0000107
sewardj112b0992005-07-23 13:19:32 +0000108extern ULong amd64g_calculate_RCR (
109 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
110 );
sewardjd20c8852005-01-20 20:04:07 +0000111
sewardjb5e5c6d2007-01-12 20:29:01 +0000112extern ULong amd64g_calculate_RCL (
113 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
114 );
115
sewardj1a179b52010-09-28 19:56:32 +0000116extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
117
sewardj5e205372005-05-09 02:57:08 +0000118extern ULong amd64g_check_fldcw ( ULong fpucw );
sewardjd20c8852005-01-20 20:04:07 +0000119
sewardj5e205372005-05-09 02:57:08 +0000120extern ULong amd64g_create_fpucw ( ULong fpround );
sewardjd20c8852005-01-20 20:04:07 +0000121
sewardjbcbb9de2005-03-27 02:22:32 +0000122extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
sewardjd20c8852005-01-20 20:04:07 +0000123
sewardjbcbb9de2005-03-27 02:22:32 +0000124extern ULong amd64g_create_mxcsr ( ULong sseround );
sewardjd20c8852005-01-20 20:04:07 +0000125
florian6ef84be2012-08-26 03:20:07 +0000126extern VexEmNote amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
127extern VexEmNote amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
128extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
sewardj4017a3b2005-06-13 12:17:27 +0000129
sewardj9ae42a72012-02-16 14:18:56 +0000130extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
131extern void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State*, HWord );
132extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
sewardj4017a3b2005-06-13 12:17:27 +0000133
sewardjd20c8852005-01-20 20:04:07 +0000134/* Translate a guest virtual_addr into a guest linear address by
135 consulting the supplied LDT/GDT structures. Their representation
136 must be as specified in pub/libvex_guest_amd64.h. To indicate a
137 translation failure, 1<<32 is returned. On success, the lower 32
138 bits of the returned result indicate the linear address.
139*/
140//extern
141//ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
142// UInt seg_selector, UInt virtual_addr );
143
144extern ULong amd64g_calculate_mmx_pmaddwd ( ULong, ULong );
145extern ULong amd64g_calculate_mmx_psadbw ( ULong, ULong );
sewardjd20c8852005-01-20 20:04:07 +0000146
sewardj8cb931e2012-02-16 22:02:14 +0000147extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
148
sewardj186f8692011-01-21 17:51:44 +0000149extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
150extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
151extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
152extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
sewardjd20c8852005-01-20 20:04:07 +0000153
sewardj4d5bce22012-02-21 11:02:44 +0000154extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
155 ULong dHi, ULong dLo,
156 ULong imm_and_return_control_bit );
157
sewardjcc3d2192013-03-27 11:37:33 +0000158extern ULong amd64g_calculate_pext ( ULong, ULong );
159extern ULong amd64g_calculate_pdep ( ULong, ULong );
160
sewardjd20c8852005-01-20 20:04:07 +0000161/* --- DIRTY HELPERS --- */
162
florianbdf99f02015-01-04 17:20:19 +0000163extern ULong amd64g_dirtyhelper_loadF80le ( Addr/*addr*/ );
sewardjd20c8852005-01-20 20:04:07 +0000164
florianbdf99f02015-01-04 17:20:19 +0000165extern void amd64g_dirtyhelper_storeF80le ( Addr/*addr*/, ULong/*data*/ );
sewardjd20c8852005-01-20 20:04:07 +0000166
sewardje9d8a262009-07-01 08:06:34 +0000167extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
168extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
sewardj0b2d3fe2010-08-06 07:59:38 +0000169extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
sewardjfe0c5e72012-06-15 15:48:07 +0000170extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
sewardj70dbeb02015-08-12 11:15:53 +0000171extern void amd64g_dirtyhelper_CPUID_avx2 ( VexGuestAMD64State* st );
sewardjd0a12df2005-02-10 02:07:43 +0000172
sewardj0585a032005-11-05 02:55:06 +0000173extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
174
sewardj70dbeb02015-08-12 11:15:53 +0000175extern void amd64g_dirtyhelper_XSAVE_COMPONENT_0
176 ( VexGuestAMD64State* gst, HWord addr );
177extern void amd64g_dirtyhelper_XSAVE_COMPONENT_1_EXCLUDING_XMMREGS
178 ( VexGuestAMD64State* gst, HWord addr );
179
180extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_0
181 ( VexGuestAMD64State* gst, HWord addr );
182extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_1_EXCLUDING_XMMREGS
183 ( VexGuestAMD64State* gst, HWord addr );
sewardj5abcfe62007-01-10 04:59:33 +0000184
sewardjbc6af532005-08-23 23:16:51 +0000185extern ULong amd64g_dirtyhelper_RDTSC ( void );
sewardj818c7302013-03-26 13:53:18 +0000186extern void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
sewardjbc6af532005-08-23 23:16:51 +0000187
sewardjbb4396c2007-11-20 17:29:08 +0000188extern ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ );
189extern void amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
190 ULong sz/*1,2 or 4*/ );
191
sewardjb9dc2432010-06-07 16:22:22 +0000192extern void amd64g_dirtyhelper_SxDT ( void* address,
193 ULong op /* 0 or 1 */ );
194
sewardjacfbd7d2010-08-17 22:52:08 +0000195/* Helps with PCMP{I,E}STR{I,M}.
196
197 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
198 actually it could be a clean helper, but for the fact that we can't
199 pass by value 2 x V128 to a clean helper, nor have one returned.)
200 Reads guest state, writes to guest state for the xSTRM cases, no
201 accesses of memory, is a pure function.
202
203 opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
204 the callee knows which I/E and I/M variant it is dealing with and
205 what the specific operation is. 4th byte of opcode is in the range
206 0x60 to 0x63:
207 istri 66 0F 3A 63
208 istrm 66 0F 3A 62
209 estri 66 0F 3A 61
210 estrm 66 0F 3A 60
211
212 gstOffL and gstOffR are the guest state offsets for the two XMM
213 register inputs. We never have to deal with the memory case since
214 that is handled by pre-loading the relevant value into the fake
215 XMM16 register.
216
217 For ESTRx variants, edxIN and eaxIN hold the values of those two
218 registers.
219
220 In all cases, the bottom 16 bits of the result contain the new
221 OSZACP %rflags values. For xSTRI variants, bits[31:16] of the
222 result hold the new %ecx value. For xSTRM variants, the helper
223 writes the result directly to the guest XMM0.
224
225 Declarable side effects: in all cases, reads guest state at
226 [gstOffL, +16) and [gstOffR, +16). For xSTRM variants, also writes
227 guest_XMM0.
228
229 Is expected to be called with opc_and_imm combinations which have
230 actually been validated, and will assert if otherwise. The front
231 end should ensure we're only called with verified values.
232*/
233extern ULong amd64g_dirtyhelper_PCMPxSTRx (
234 VexGuestAMD64State*,
235 HWord opc4_and_imm,
236 HWord gstOffL, HWord gstOffR,
237 HWord edxIN, HWord eaxIN
238 );
239
philippeff4d6be2012-02-14 21:34:56 +0000240/* Implementation of intel AES instructions as described in
241 Intel Advanced Vector Extensions
242 Programming Reference
243 MARCH 2008
244 319433-002.
245
246 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
247 actually it could be a clean helper, but for the fact that we can't
248 pass by value 2 x V128 to a clean helper, nor have one returned.)
249 Reads guest state, writes to guest state, no
250 accesses of memory, is a pure function.
251
252 opc4 contains the 4th byte of opcode. Front-end should only
253 give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
254 (will assert otherwise).
255
256 gstOffL and gstOffR are the guest state offsets for the two XMM
sewardj1407a362012-06-24 15:11:38 +0000257 register inputs, gstOffD is the guest state offset for the XMM register
258 output. We never have to deal with the memory case since that is handled
259 by pre-loading the relevant value into the fake XMM16 register.
philippeff4d6be2012-02-14 21:34:56 +0000260
261*/
262extern void amd64g_dirtyhelper_AES (
263 VexGuestAMD64State* gst,
sewardj1407a362012-06-24 15:11:38 +0000264 HWord opc4, HWord gstOffD,
philippeff4d6be2012-02-14 21:34:56 +0000265 HWord gstOffL, HWord gstOffR
266 );
267
268/* Implementation of AESKEYGENASSIST.
269
270 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
271 actually it could be a clean helper, but for the fact that we can't
272 pass by value 1 x V128 to a clean helper, nor have one returned.)
273 Reads guest state, writes to guest state, no
274 accesses of memory, is a pure function.
275
276 imm8 is the Round Key constant.
277
278 gstOffL and gstOffR are the guest state offsets for the two XMM
279 register input and output. We never have to deal with the memory case since
280 that is handled by pre-loading the relevant value into the fake
281 XMM16 register.
282
283*/
284extern void amd64g_dirtyhelper_AESKEYGENASSIST (
285 VexGuestAMD64State* gst,
286 HWord imm8,
287 HWord gstOffL, HWord gstOffR
288 );
sewardj0b2d3fe2010-08-06 07:59:38 +0000289
sewardjd20c8852005-01-20 20:04:07 +0000290//extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
291//extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
292//extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
293
294//extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
295
florian6ef84be2012-08-26 03:20:07 +0000296//extern VexEmNote
sewardjd20c8852005-01-20 20:04:07 +0000297// amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
298
299//extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
300
florian6ef84be2012-08-26 03:20:07 +0000301//extern VexEmNote
sewardjd20c8852005-01-20 20:04:07 +0000302// amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
303
sewardjd20c8852005-01-20 20:04:07 +0000304
305
306/*---------------------------------------------------------*/
307/*--- Condition code stuff ---*/
308/*---------------------------------------------------------*/
309
310/* rflags masks */
311#define AMD64G_CC_SHIFT_O 11
312#define AMD64G_CC_SHIFT_S 7
313#define AMD64G_CC_SHIFT_Z 6
314#define AMD64G_CC_SHIFT_A 4
315#define AMD64G_CC_SHIFT_C 0
316#define AMD64G_CC_SHIFT_P 2
317
sewardj5328b102006-05-21 12:02:44 +0000318#define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O)
319#define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S)
320#define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z)
321#define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A)
322#define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
323#define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
sewardjd20c8852005-01-20 20:04:07 +0000324
sewardj3e5d82d2015-07-21 14:43:23 +0000325/* additional rflags masks */
326#define AMD64G_CC_SHIFT_ID 21
327#define AMD64G_CC_SHIFT_AC 18
328#define AMD64G_CC_SHIFT_D 10
329
330#define AMD64G_CC_MASK_ID (1ULL << AMD64G_CC_SHIFT_ID)
331#define AMD64G_CC_MASK_AC (1ULL << AMD64G_CC_SHIFT_AC)
332#define AMD64G_CC_MASK_D (1ULL << AMD64G_CC_SHIFT_D)
333
sewardjd20c8852005-01-20 20:04:07 +0000334/* FPU flag masks */
sewardj4f9847d2005-07-25 11:58:34 +0000335#define AMD64G_FC_SHIFT_C3 14
336#define AMD64G_FC_SHIFT_C2 10
337#define AMD64G_FC_SHIFT_C1 9
338#define AMD64G_FC_SHIFT_C0 8
339
sewardj5328b102006-05-21 12:02:44 +0000340#define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3)
341#define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2)
342#define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1)
343#define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0)
sewardj4f9847d2005-07-25 11:58:34 +0000344
sewardjd20c8852005-01-20 20:04:07 +0000345
346/* %RFLAGS thunk descriptors. A four-word thunk is used to record
347 details of the most recent flag-setting operation, so the flags can
348 be computed later if needed. It is possible to do this a little
349 more efficiently using a 3-word thunk, but that makes it impossible
350 to describe the flag data dependencies sufficiently accurately for
351 Memcheck. Hence 4 words are used, with minimal loss of efficiency.
352
353 The four words are:
354
355 CC_OP, which describes the operation.
356
357 CC_DEP1 and CC_DEP2. These are arguments to the operation.
358 We want Memcheck to believe that the resulting flags are
359 data-dependent on both CC_DEP1 and CC_DEP2, hence the
360 name DEP.
361
362 CC_NDEP. This is a 3rd argument to the operation which is
363 sometimes needed. We arrange things so that Memcheck does
364 not believe the resulting flags are data-dependent on CC_NDEP
365 ("not dependent").
366
367 To make Memcheck believe that (the definedness of) the encoded
368 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
369 requires two things:
370
371 (1) In the guest state layout info (amd64guest_layout), CC_OP and
372 CC_NDEP are marked as always defined.
373
374 (2) When passing the thunk components to an evaluation function
375 (calculate_condition, calculate_eflags, calculate_eflags_c) the
376 IRCallee's mcx_mask must be set so as to exclude from
377 consideration all passed args except CC_DEP1 and CC_DEP2.
378
379 Strictly speaking only (2) is necessary for correctness. However,
380 (1) helps efficiency in that since (2) means we never ask about the
381 definedness of CC_OP or CC_NDEP, we may as well not even bother to
382 track their definedness.
383
384 When building the thunk, it is always necessary to write words into
385 CC_DEP1 and CC_DEP2, even if those args are not used given the
386 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
387 This is important because otherwise Memcheck could give false
388 positives as it does not understand the relationship between the
389 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
390 definedness of the stored flags always depends on both CC_DEP1 and
391 CC_DEP2.
392
393 However, it is only necessary to set CC_NDEP when the CC_OP value
394 requires it, because Memcheck ignores CC_NDEP, and the evaluation
395 functions do understand the CC_OP fields and will only examine
396 CC_NDEP for suitable values of CC_OP.
397
398 A summary of the field usages is:
399
400 Operation DEP1 DEP2 NDEP
401 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
402
403 add/sub/mul first arg second arg unused
404
405 adc/sbb first arg (second arg)
406 XOR old_carry old_carry
407
408 and/or/xor result zero unused
409
410 inc/dec result zero old_carry
411
412 shl/shr/sar result subshifted- unused
413 result
414
415 rol/ror result zero old_flags
416
417 copy old_flags zero unused.
418
419
420 Therefore Memcheck will believe the following:
421
422 * add/sub/mul -- definedness of result flags depends on definedness
423 of both args.
424
425 * adc/sbb -- definedness of result flags depends on definedness of
426 both args and definedness of the old C flag. Because only two
427 DEP fields are available, the old C flag is XOR'd into the second
428 arg so that Memcheck sees the data dependency on it. That means
429 the NDEP field must contain a second copy of the old C flag
430 so that the evaluation functions can correctly recover the second
431 arg.
432
433 * and/or/xor are straightforward -- definedness of result flags
434 depends on definedness of result value.
435
436 * inc/dec -- definedness of result flags depends only on
437 definedness of result. This isn't really true -- it also depends
438 on the old C flag. However, we don't want Memcheck to see that,
439 and so the old C flag must be passed in NDEP and not in DEP2.
440 It's inconceivable that a compiler would generate code that puts
441 the C flag in an undefined state, then does an inc/dec, which
442 leaves C unchanged, and then makes a conditional jump/move based
443 on C. So our fiction seems a good approximation.
444
445 * shl/shr/sar -- straightforward, again, definedness of result
446 flags depends on definedness of result value. The subshifted
447 value (value shifted one less) is also needed, but its
448 definedness is the same as the definedness of the shifted value.
449
450 * rol/ror -- these only set O and C, and leave A Z C P alone.
451 However it seems prudent (as per inc/dec) to say the definedness
452 of all resulting flags depends on the definedness of the result,
453 hence the old flags must go in as NDEP and not DEP2.
454
455 * rcl/rcr are too difficult to do in-line, and so are done by a
456 helper function. They are not part of this scheme. The helper
457 function takes the value to be rotated, the rotate amount and the
458 old flags, and returns the new flags and the rotated value.
459 Since the helper's mcx_mask does not have any set bits, Memcheck
460 will lazily propagate undefinedness from any of the 3 args into
461 both results (flags and actual value).
462*/
463enum {
sewardjdf0e0022005-01-25 15:48:43 +0000464 AMD64G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
sewardjd20c8852005-01-20 20:04:07 +0000465 /* just copy DEP1 to output */
466
467 AMD64G_CC_OP_ADDB, /* 1 */
468 AMD64G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
469 AMD64G_CC_OP_ADDL, /* 3 */
sewardjdf0e0022005-01-25 15:48:43 +0000470 AMD64G_CC_OP_ADDQ, /* 4 */
sewardjd20c8852005-01-20 20:04:07 +0000471
sewardjdf0e0022005-01-25 15:48:43 +0000472 AMD64G_CC_OP_SUBB, /* 5 */
473 AMD64G_CC_OP_SUBW, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
474 AMD64G_CC_OP_SUBL, /* 7 */
475 AMD64G_CC_OP_SUBQ, /* 8 */
sewardjd20c8852005-01-20 20:04:07 +0000476
sewardjdf0e0022005-01-25 15:48:43 +0000477 AMD64G_CC_OP_ADCB, /* 9 */
478 AMD64G_CC_OP_ADCW, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
479 AMD64G_CC_OP_ADCL, /* 11 */
480 AMD64G_CC_OP_ADCQ, /* 12 */
sewardjd20c8852005-01-20 20:04:07 +0000481
sewardjdf0e0022005-01-25 15:48:43 +0000482 AMD64G_CC_OP_SBBB, /* 13 */
483 AMD64G_CC_OP_SBBW, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
484 AMD64G_CC_OP_SBBL, /* 15 */
485 AMD64G_CC_OP_SBBQ, /* 16 */
sewardjd20c8852005-01-20 20:04:07 +0000486
sewardjdf0e0022005-01-25 15:48:43 +0000487 AMD64G_CC_OP_LOGICB, /* 17 */
488 AMD64G_CC_OP_LOGICW, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
489 AMD64G_CC_OP_LOGICL, /* 19 */
490 AMD64G_CC_OP_LOGICQ, /* 20 */
sewardjd20c8852005-01-20 20:04:07 +0000491
sewardjdf0e0022005-01-25 15:48:43 +0000492 AMD64G_CC_OP_INCB, /* 21 */
493 AMD64G_CC_OP_INCW, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
494 AMD64G_CC_OP_INCL, /* 23 */
495 AMD64G_CC_OP_INCQ, /* 24 */
sewardjd20c8852005-01-20 20:04:07 +0000496
sewardjdf0e0022005-01-25 15:48:43 +0000497 AMD64G_CC_OP_DECB, /* 25 */
498 AMD64G_CC_OP_DECW, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
499 AMD64G_CC_OP_DECL, /* 27 */
500 AMD64G_CC_OP_DECQ, /* 28 */
sewardjd20c8852005-01-20 20:04:07 +0000501
sewardjdf0e0022005-01-25 15:48:43 +0000502 AMD64G_CC_OP_SHLB, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
503 AMD64G_CC_OP_SHLW, /* 30 where res' is like res but shifted one bit less */
504 AMD64G_CC_OP_SHLL, /* 31 */
505 AMD64G_CC_OP_SHLQ, /* 32 */
sewardjd20c8852005-01-20 20:04:07 +0000506
sewardjdf0e0022005-01-25 15:48:43 +0000507 AMD64G_CC_OP_SHRB, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
508 AMD64G_CC_OP_SHRW, /* 34 where res' is like res but shifted one bit less */
509 AMD64G_CC_OP_SHRL, /* 35 */
510 AMD64G_CC_OP_SHRQ, /* 36 */
sewardjd20c8852005-01-20 20:04:07 +0000511
sewardjdf0e0022005-01-25 15:48:43 +0000512 AMD64G_CC_OP_ROLB, /* 37 */
513 AMD64G_CC_OP_ROLW, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
514 AMD64G_CC_OP_ROLL, /* 39 */
515 AMD64G_CC_OP_ROLQ, /* 40 */
sewardjd20c8852005-01-20 20:04:07 +0000516
sewardjdf0e0022005-01-25 15:48:43 +0000517 AMD64G_CC_OP_RORB, /* 41 */
518 AMD64G_CC_OP_RORW, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
519 AMD64G_CC_OP_RORL, /* 43 */
520 AMD64G_CC_OP_RORQ, /* 44 */
sewardjd20c8852005-01-20 20:04:07 +0000521
sewardjdf0e0022005-01-25 15:48:43 +0000522 AMD64G_CC_OP_UMULB, /* 45 */
523 AMD64G_CC_OP_UMULW, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
524 AMD64G_CC_OP_UMULL, /* 47 */
sewardj32b2bbe2005-01-28 00:50:10 +0000525 AMD64G_CC_OP_UMULQ, /* 48 */
sewardjd20c8852005-01-20 20:04:07 +0000526
sewardj32b2bbe2005-01-28 00:50:10 +0000527 AMD64G_CC_OP_SMULB, /* 49 */
528 AMD64G_CC_OP_SMULW, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
529 AMD64G_CC_OP_SMULL, /* 51 */
530 AMD64G_CC_OP_SMULQ, /* 52 */
sewardjd20c8852005-01-20 20:04:07 +0000531
sewardjcc3d2192013-03-27 11:37:33 +0000532 AMD64G_CC_OP_ANDN32, /* 53 */
533 AMD64G_CC_OP_ANDN64, /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
534
535 AMD64G_CC_OP_BLSI32, /* 55 */
536 AMD64G_CC_OP_BLSI64, /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
537
538 AMD64G_CC_OP_BLSMSK32,/* 57 */
539 AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
540
541 AMD64G_CC_OP_BLSR32, /* 59 */
542 AMD64G_CC_OP_BLSR64, /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
543
Elliott Hughesed398002017-06-21 14:41:24 -0700544 AMD64G_CC_OP_ADCX32, /* 61 DEP1 = argL, DEP2 = argR ^ oldCarry, .. */
545 AMD64G_CC_OP_ADCX64, /* 62 .. NDEP = old flags */
546
547 AMD64G_CC_OP_ADOX32, /* 63 DEP1 = argL, DEP2 = argR ^ oldOverflow, .. */
548 AMD64G_CC_OP_ADOX64, /* 64 .. NDEP = old flags */
549
sewardjd20c8852005-01-20 20:04:07 +0000550 AMD64G_CC_OP_NUMBER
551};
552
553typedef
554 enum {
555 AMD64CondO = 0, /* overflow */
556 AMD64CondNO = 1, /* no overflow */
557
558 AMD64CondB = 2, /* below */
559 AMD64CondNB = 3, /* not below */
560
561 AMD64CondZ = 4, /* zero */
562 AMD64CondNZ = 5, /* not zero */
563
564 AMD64CondBE = 6, /* below or equal */
565 AMD64CondNBE = 7, /* not below or equal */
566
567 AMD64CondS = 8, /* negative */
568 AMD64CondNS = 9, /* not negative */
569
570 AMD64CondP = 10, /* parity even */
571 AMD64CondNP = 11, /* not parity even */
572
sewardjaedb8592014-10-02 16:15:30 +0000573 AMD64CondL = 12, /* less */
sewardjd20c8852005-01-20 20:04:07 +0000574 AMD64CondNL = 13, /* not less */
575
576 AMD64CondLE = 14, /* less or equal */
577 AMD64CondNLE = 15, /* not less or equal */
578
579 AMD64CondAlways = 16 /* HACK */
580 }
581 AMD64Condcode;
582
sewardjcef7d3e2009-07-02 12:21:59 +0000583#endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
sewardjd20c8852005-01-20 20:04:07 +0000584
585/*---------------------------------------------------------------*/
sewardjcef7d3e2009-07-02 12:21:59 +0000586/*--- end guest_amd64_defs.h ---*/
sewardjd20c8852005-01-20 20:04:07 +0000587/*---------------------------------------------------------------*/