blob: ac4923291231a41e323656da34917b55d2a33e98 [file] [log] [blame]
Chris Lattner87be16a2010-10-05 06:04:14 +00001//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
Michael J. Spencer6e56b182010-10-20 23:40:27 +00002//
Chris Lattner87be16a2010-10-05 06:04:14 +00003// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Michael J. Spencer6e56b182010-10-20 23:40:27 +00007//
Chris Lattner87be16a2010-10-05 06:04:14 +00008//===----------------------------------------------------------------------===//
9//
10// This file describes the various pseudo instructions used by the compiler,
11// as well as Pat patterns used during instruction selection.
12//
13//===----------------------------------------------------------------------===//
14
Chris Lattner41efbfa2010-10-05 06:37:31 +000015//===----------------------------------------------------------------------===//
16// Pattern Matching Support
17
18def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
21}]>;
22
Rafael Espindoladba81cf2010-10-13 13:31:20 +000023def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
26}]>;
27
Chris Lattner41efbfa2010-10-05 06:37:31 +000028
29//===----------------------------------------------------------------------===//
30// Random Pseudo Instructions.
31
Chris Lattner8af88ef2010-10-05 06:10:16 +000032// PIC base construction. This expands to code that looks like this:
33// call $next_inst
34// popl %destreg"
35let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
37 "", []>;
38
39
40// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41// a stack adjustment and the codegen must know that they may modify the stack
42// pointer before prolog-epilog rewriting occurs.
43// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44// sub / add which can clobber EFLAGS.
45let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
47 "#ADJCALLSTACKDOWN",
48 [(X86callseq_start timm:$amt)]>,
49 Requires<[In32BitMode]>;
50def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
51 "#ADJCALLSTACKUP",
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53 Requires<[In32BitMode]>;
54}
55
56// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57// a stack adjustment and the codegen must know that they may modify the stack
58// pointer before prolog-epilog rewriting occurs.
59// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60// sub / add which can clobber EFLAGS.
61let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
63 "#ADJCALLSTACKDOWN",
64 [(X86callseq_start timm:$amt)]>,
65 Requires<[In64BitMode]>;
66def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
67 "#ADJCALLSTACKUP",
68 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69 Requires<[In64BitMode]>;
70}
71
72
73
74// x86-64 va_start lowering magic.
75let usesCustomInserter = 1 in {
76def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
77 (outs),
78 (ins GR8:$al,
79 i64imm:$regsavefi, i64imm:$offset,
80 variable_ops),
81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82 [(X86vastart_save_xmm_regs GR8:$al,
83 imm:$regsavefi,
84 imm:$offset)]>;
85
Dan Gohman320afb82010-10-12 18:00:49 +000086// The VAARG_64 pseudo-instruction takes the address of the va_list,
87// and places the address of the next argument into a register.
88let Defs = [EFLAGS] in
89def VAARG_64 : I<0, Pseudo,
90 (outs GR64:$dst),
91 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
92 "#VAARG_64 $dst, $ap, $size, $mode, $align",
93 [(set GR64:$dst,
94 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
95 (implicit EFLAGS)]>;
96
Michael J. Spencere9c253e2010-10-21 01:41:01 +000097// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
98// targets. These calls are needed to probe the stack when allocating more than
99// 4k bytes in one go. Touching the stack at 4K increments is necessary to
100// ensure that the guard pages used by the OS virtual memory manager are
101// allocated in correct sequence.
Chris Lattner8af88ef2010-10-05 06:10:16 +0000102// The main point of having separate instruction are extra unmodelled effects
103// (compared to ordinary calls) like stack pointer change.
104
105let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
Michael J. Spencere9c253e2010-10-21 01:41:01 +0000106 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
107 "# dynamic stack allocation",
108 [(X86WinAlloca)]>;
Rafael Espindolad07b7ec2011-08-30 19:43:21 +0000109
110// When using segmented stacks these are lowered into instructions which first
111// check if the current stacklet has enough free memory. If it does, memory is
112// allocated by bumping the stack pointer. Otherwise memory is allocated from
113// the heap.
114
Rafael Espindola66bf7432011-10-26 21:16:41 +0000115let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
Rafael Espindolad07b7ec2011-08-30 19:43:21 +0000116def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
117 "# variable sized alloca for segmented stacks",
118 [(set GR32:$dst,
119 (X86SegAlloca GR32:$size))]>,
120 Requires<[In32BitMode]>;
121
Rafael Espindola66bf7432011-10-26 21:16:41 +0000122let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
Rafael Espindolad07b7ec2011-08-30 19:43:21 +0000123def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
124 "# variable sized alloca for segmented stacks",
125 [(set GR64:$dst,
126 (X86SegAlloca GR64:$size))]>,
127 Requires<[In64BitMode]>;
Chris Lattner8af88ef2010-10-05 06:10:16 +0000128}
129
Michael J. Spencer1a2d0612012-02-24 19:01:22 +0000130// The MSVC runtime contains an _ftol2 routine for converting floating-point
131// to integer values. It has a strange calling convention: the input is
132// popped from the x87 stack, and the return value is given in EDX:EAX. No
133// other registers (aside from flags) are touched.
134// Microsoft toolchains do not support 80-bit precision, so a WIN_FTOL_80
135// variant is unnecessary.
Chris Lattner8af88ef2010-10-05 06:10:16 +0000136
Michael J. Spencer1a2d0612012-02-24 19:01:22 +0000137let Defs = [EAX, EDX, EFLAGS], FPForm = SpecialFP in {
138 def WIN_FTOL_32 : I<0, Pseudo, (outs), (ins RFP32:$src),
139 "# win32 fptoui",
140 [(X86WinFTOL RFP32:$src)]>,
141 Requires<[In32BitMode]>;
142
143 def WIN_FTOL_64 : I<0, Pseudo, (outs), (ins RFP64:$src),
144 "# win32 fptoui",
145 [(X86WinFTOL RFP64:$src)]>,
146 Requires<[In32BitMode]>;
147}
Chris Lattner87be16a2010-10-05 06:04:14 +0000148
149//===----------------------------------------------------------------------===//
150// EH Pseudo Instructions
151//
152let isTerminator = 1, isReturn = 1, isBarrier = 1,
153 hasCtrlDep = 1, isCodeGenOnly = 1 in {
154def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
155 "ret\t#eh_return, addr: $addr",
156 [(X86ehret GR32:$addr)]>;
157
158}
159
160let isTerminator = 1, isReturn = 1, isBarrier = 1,
161 hasCtrlDep = 1, isCodeGenOnly = 1 in {
162def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
163 "ret\t#eh_return, addr: $addr",
164 [(X86ehret GR64:$addr)]>;
165
166}
167
Chris Lattner8af88ef2010-10-05 06:10:16 +0000168//===----------------------------------------------------------------------===//
Rafael Espindolae840e882011-10-26 21:12:27 +0000169// Pseudo instructions used by segmented stacks.
170//
171
172// This is lowered into a RET instruction by MCInstLower. We need
173// this so that we don't have to have a MachineBasicBlock which ends
174// with a RET and also has successors.
175let isPseudo = 1 in {
176def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
177 "", []>;
178
179// This instruction is lowered to a RET followed by a MOV. The two
180// instructions are not generated on a higher level since then the
181// verifier sees a MachineBasicBlock ending with a non-terminator.
182def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
183 "", []>;
184}
185
186//===----------------------------------------------------------------------===//
Chris Lattner8af88ef2010-10-05 06:10:16 +0000187// Alias Instructions
188//===----------------------------------------------------------------------===//
189
190// Alias instructions that map movr0 to xor.
191// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
192// FIXME: Set encoding to pseudo.
193let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
194 isCodeGenOnly = 1 in {
195def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
196 [(set GR8:$dst, 0)]>;
197
198// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
199// encoding and avoids a partial-register update sometimes, but doing so
200// at isel time interferes with rematerialization in the current register
201// allocator. For now, this is rewritten when the instruction is lowered
202// to an MCInst.
203def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
204 "",
205 [(set GR16:$dst, 0)]>, OpSize;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000206
Chris Lattner8af88ef2010-10-05 06:10:16 +0000207// FIXME: Set encoding to pseudo.
208def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
209 [(set GR32:$dst, 0)]>;
210}
211
Chris Lattner010496c2010-10-05 06:22:35 +0000212// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
213// smaller encoding, but doing so at isel time interferes with rematerialization
214// in the current register allocator. For now, this is rewritten when the
215// instruction is lowered to an MCInst.
216// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
217// when we have a better way to specify isel priority.
Chris Lattnera4a3a5e2010-10-31 19:15:18 +0000218let Defs = [EFLAGS], isCodeGenOnly=1,
Chris Lattner010496c2010-10-05 06:22:35 +0000219 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
220def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
221 [(set GR64:$dst, 0)]>;
222
223// Materialize i64 constant where top 32-bits are zero. This could theoretically
224// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
225// that would make it more difficult to rematerialize.
Chris Lattnera4a3a5e2010-10-31 19:15:18 +0000226let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
227 isCodeGenOnly = 1 in
Chris Lattner010496c2010-10-05 06:22:35 +0000228def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
229 "", [(set GR64:$dst, i64immZExt32:$src)]>;
230
Chris Lattner2c383d82010-10-05 21:18:04 +0000231// Use sbb to materialize carry bit.
232let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
233// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
Chris Lattner35649fc2010-10-05 06:33:16 +0000234// However, Pat<> can't replicate the destination reg into the inputs of the
235// result.
Chris Lattner2c383d82010-10-05 21:18:04 +0000236// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
Chris Lattner35649fc2010-10-05 06:33:16 +0000237// X86CodeEmitter.
Chris Lattner2c383d82010-10-05 21:18:04 +0000238def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
239 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
240def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
241 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
242 OpSize;
243def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
244 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
Chris Lattner35649fc2010-10-05 06:33:16 +0000245def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
246 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
Chris Lattner2c383d82010-10-05 21:18:04 +0000247} // isCodeGenOnly
248
Chris Lattner35649fc2010-10-05 06:33:16 +0000249
Chris Lattnerc19d1c32010-12-19 22:08:31 +0000250def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
251 (SETB_C16r)>;
252def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
253 (SETB_C32r)>;
Chris Lattner35649fc2010-10-05 06:33:16 +0000254def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
255 (SETB_C64r)>;
256
Chris Lattnerc19d1c32010-12-19 22:08:31 +0000257def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
258 (SETB_C16r)>;
259def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
260 (SETB_C32r)>;
261def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
262 (SETB_C64r)>;
263
Chris Lattner39ffcb72010-12-20 01:16:03 +0000264// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
265// will be eliminated and that the sbb can be extended up to a wider type. When
266// this happens, it is great. However, if we are left with an 8-bit sbb and an
267// and, we might as well just match it as a setb.
268def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
269 (SETBr)>;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000270
Benjamin Kramerf51190b2011-05-08 18:36:07 +0000271// (add OP, SETB) -> (adc OP, 0)
272def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
273 (ADC8ri GR8:$op, 0)>;
274def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
275 (ADC32ri8 GR32:$op, 0)>;
276def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
277 (ADC64ri8 GR64:$op, 0)>;
278
279// (sub OP, SETB) -> (sbb OP, 0)
280def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
281 (SBB8ri GR8:$op, 0)>;
282def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
283 (SBB32ri8 GR32:$op, 0)>;
284def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
285 (SBB64ri8 GR64:$op, 0)>;
286
287// (sub OP, SETCC_CARRY) -> (adc OP, 0)
288def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
289 (ADC8ri GR8:$op, 0)>;
290def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
291 (ADC32ri8 GR32:$op, 0)>;
292def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
293 (ADC64ri8 GR64:$op, 0)>;
294
Chris Lattnerd3f033d2010-10-05 06:27:48 +0000295//===----------------------------------------------------------------------===//
296// String Pseudo Instructions
297//
298let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
299def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
300 [(X86rep_movs i8)]>, REP;
301def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
302 [(X86rep_movs i16)]>, REP, OpSize;
303def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
304 [(X86rep_movs i32)]>, REP;
305}
306
307let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
308def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
309 [(X86rep_movs i64)]>, REP;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000310
Chris Lattnerd3f033d2010-10-05 06:27:48 +0000311
312// FIXME: Should use "(X86rep_stos AL)" as the pattern.
313let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
314def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
315 [(X86rep_stos i8)]>, REP;
316let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
317def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
318 [(X86rep_stos i16)]>, REP, OpSize;
319let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
320def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
321 [(X86rep_stos i32)]>, REP;
322
323let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
324def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
325 [(X86rep_stos i64)]>, REP;
Chris Lattner010496c2010-10-05 06:22:35 +0000326
327
Chris Lattner8af88ef2010-10-05 06:10:16 +0000328//===----------------------------------------------------------------------===//
329// Thread Local Storage Instructions
330//
331
332// ELF TLS Support
333// All calls clobber the non-callee saved registers. ESP is marked as
334// a use to prevent stack-pointer assignments that appear immediately
335// before calls from potentially appearing dead.
336let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
337 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
338 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
339 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Rafael Espindolad652dbe2010-11-28 21:16:39 +0000340 Uses = [ESP] in
Chris Lattner8af88ef2010-10-05 06:10:16 +0000341def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
Rafael Espindola5bf7c532010-11-27 20:43:02 +0000342 "# TLS_addr32",
Chris Lattner8af88ef2010-10-05 06:10:16 +0000343 [(X86tlsaddr tls32addr:$sym)]>,
344 Requires<[In32BitMode]>;
345
346// All calls clobber the non-callee saved registers. RSP is marked as
347// a use to prevent stack-pointer assignments that appear immediately
348// before calls from potentially appearing dead.
349let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
350 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
351 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
352 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
353 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Rafael Espindolad652dbe2010-11-28 21:16:39 +0000354 Uses = [RSP] in
Chris Lattner8af88ef2010-10-05 06:10:16 +0000355def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
Rafael Espindola5bf7c532010-11-27 20:43:02 +0000356 "# TLS_addr64",
Chris Lattner8af88ef2010-10-05 06:10:16 +0000357 [(X86tlsaddr tls64addr:$sym)]>,
358 Requires<[In64BitMode]>;
359
360// Darwin TLS Support
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000361// For i386, the address of the thunk is passed on the stack, on return the
362// address of the variable is in %eax. %ecx is trashed during the function
Chris Lattner8af88ef2010-10-05 06:10:16 +0000363// call. All other registers are preserved.
Eric Christophercdfe3c32011-01-18 01:37:20 +0000364let Defs = [EAX, ECX, EFLAGS],
Chris Lattner8af88ef2010-10-05 06:10:16 +0000365 Uses = [ESP],
366 usesCustomInserter = 1 in
367def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
368 "# TLSCall_32",
369 [(X86TLSCall addr:$sym)]>,
370 Requires<[In32BitMode]>;
371
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000372// For x86_64, the address of the thunk is passed in %rdi, on return
Chris Lattner8af88ef2010-10-05 06:10:16 +0000373// the address of the variable is in %rax. All other registers are preserved.
Eric Christophercdfe3c32011-01-18 01:37:20 +0000374let Defs = [RAX, EFLAGS],
Eric Christopher28717682010-12-09 00:26:41 +0000375 Uses = [RSP, RDI],
Chris Lattner8af88ef2010-10-05 06:10:16 +0000376 usesCustomInserter = 1 in
377def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
378 "# TLSCall_64",
379 [(X86TLSCall addr:$sym)]>,
380 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000381
Chris Lattner6dbbff92010-10-05 23:09:10 +0000382
383//===----------------------------------------------------------------------===//
384// Conditional Move Pseudo Instructions
385
Chris Lattner6dbbff92010-10-05 23:09:10 +0000386// X86 doesn't have 8-bit conditional moves. Use a customInserter to
387// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
388// however that requires promoting the operands, and can induce additional
Jakob Stoklund Olesen5047d762011-09-02 23:52:55 +0000389// i8 register pressure.
390let usesCustomInserter = 1, Uses = [EFLAGS] in {
Chris Lattner6dbbff92010-10-05 23:09:10 +0000391def CMOV_GR8 : I<0, Pseudo,
392 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
393 "#CMOV_GR8 PSEUDO!",
394 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
395 imm:$cond, EFLAGS))]>;
396
397let Predicates = [NoCMov] in {
398def CMOV_GR32 : I<0, Pseudo,
399 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
400 "#CMOV_GR32* PSEUDO!",
401 [(set GR32:$dst,
402 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
403def CMOV_GR16 : I<0, Pseudo,
404 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
405 "#CMOV_GR16* PSEUDO!",
406 [(set GR16:$dst,
407 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
408def CMOV_RFP32 : I<0, Pseudo,
409 (outs RFP32:$dst),
410 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
411 "#CMOV_RFP32 PSEUDO!",
412 [(set RFP32:$dst,
413 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
414 EFLAGS))]>;
415def CMOV_RFP64 : I<0, Pseudo,
416 (outs RFP64:$dst),
417 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
418 "#CMOV_RFP64 PSEUDO!",
419 [(set RFP64:$dst,
420 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
421 EFLAGS))]>;
422def CMOV_RFP80 : I<0, Pseudo,
423 (outs RFP80:$dst),
424 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
425 "#CMOV_RFP80 PSEUDO!",
426 [(set RFP80:$dst,
427 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
428 EFLAGS))]>;
429} // Predicates = [NoCMov]
Jakob Stoklund Olesen5047d762011-09-02 23:52:55 +0000430} // UsesCustomInserter = 1, Uses = [EFLAGS]
Chris Lattner6dbbff92010-10-05 23:09:10 +0000431
432
Chris Lattner87be16a2010-10-05 06:04:14 +0000433//===----------------------------------------------------------------------===//
Chris Lattner010496c2010-10-05 06:22:35 +0000434// Atomic Instruction Pseudo Instructions
435//===----------------------------------------------------------------------===//
436
437// Atomic exchange, and, or, xor
438let Constraints = "$val = $dst", Defs = [EFLAGS],
439 usesCustomInserter = 1 in {
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000440
Chris Lattner010496c2010-10-05 06:22:35 +0000441def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000442 "#ATOMAND8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000443 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
444def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000445 "#ATOMOR8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000446 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
447def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000448 "#ATOMXOR8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000449 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
450def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000451 "#ATOMNAND8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000452 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
453
454def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000455 "#ATOMAND16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000456 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
457def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000458 "#ATOMOR16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000459 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
460def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000461 "#ATOMXOR16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000462 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
463def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000464 "#ATOMNAND16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000465 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
466def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000467 "#ATOMMIN16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000468 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
469def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000470 "#ATOMMAX16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000471 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
472def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000473 "#ATOMUMIN16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000474 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
475def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000476 "#ATOMUMAX16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000477 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
478
479
480def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000481 "#ATOMAND32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000482 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
483def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000484 "#ATOMOR32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000485 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
486def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000487 "#ATOMXOR32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000488 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
489def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000490 "#ATOMNAND32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000491 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
492def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000493 "#ATOMMIN32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000494 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
495def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000496 "#ATOMMAX32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000497 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
498def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000499 "#ATOMUMIN32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000500 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
501def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000502 "#ATOMUMAX32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000503 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
504
505
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000506
Chris Lattner010496c2010-10-05 06:22:35 +0000507def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000508 "#ATOMAND64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000509 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
510def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000511 "#ATOMOR64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000512 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
513def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000514 "#ATOMXOR64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000515 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
516def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000517 "#ATOMNAND64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000518 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
519def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000520 "#ATOMMIN64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000521 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
522def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000523 "#ATOMMAX64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000524 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
525def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000526 "#ATOMUMIN64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000527 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
528def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000529 "#ATOMUMAX64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000530 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
531}
532
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000533let Constraints = "$val1 = $dst1, $val2 = $dst2",
Chris Lattner010496c2010-10-05 06:22:35 +0000534 Defs = [EFLAGS, EAX, EBX, ECX, EDX],
535 Uses = [EAX, EBX, ECX, EDX],
536 mayLoad = 1, mayStore = 1,
537 usesCustomInserter = 1 in {
538def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
539 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
540 "#ATOMAND6432 PSEUDO!", []>;
541def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
542 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
543 "#ATOMOR6432 PSEUDO!", []>;
544def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
545 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
546 "#ATOMXOR6432 PSEUDO!", []>;
547def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
548 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
549 "#ATOMNAND6432 PSEUDO!", []>;
550def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
551 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
552 "#ATOMADD6432 PSEUDO!", []>;
553def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
554 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
555 "#ATOMSUB6432 PSEUDO!", []>;
556def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
557 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
558 "#ATOMSWAP6432 PSEUDO!", []>;
559}
560
561//===----------------------------------------------------------------------===//
562// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
563//===----------------------------------------------------------------------===//
564
565// FIXME: Use normal instructions and add lock prefix dynamically.
566
567// Memory barriers
568
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000569// TODO: Get this to fold the constant into the instruction.
Eli Friedman1857b512012-01-16 16:42:21 +0000570let isCodeGenOnly = 1, Defs = [EFLAGS] in
Chris Lattner010496c2010-10-05 06:22:35 +0000571def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
572 "lock\n\t"
573 "or{l}\t{$zero, $dst|$dst, $zero}",
574 []>, Requires<[In32BitMode]>, LOCK;
575
576let hasSideEffects = 1 in
577def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
578 "#MEMBARRIER",
Eli Friedman84e7f7e2011-07-27 19:43:50 +0000579 [(X86MemBarrier)]>;
Chris Lattner010496c2010-10-05 06:22:35 +0000580
Eric Christopher988397d2011-05-10 18:36:16 +0000581// RegOpc corresponds to the mr version of the instruction
582// ImmOpc corresponds to the mi version of the instruction
583// ImmOpc8 corresponds to the mi8 version of the instruction
584// ImmMod corresponds to the instruction format of the mi and mi8 versions
585multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
586 Format ImmMod, string mnemonic> {
587let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
588
589def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
590 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
591 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
592 !strconcat("lock\n\t", mnemonic, "{b}\t",
593 "{$src2, $dst|$dst, $src2}"),
594 []>, LOCK;
595def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
596 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
597 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
598 !strconcat("lock\n\t", mnemonic, "{w}\t",
599 "{$src2, $dst|$dst, $src2}"),
600 []>, OpSize, LOCK;
601def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
602 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
603 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
604 !strconcat("lock\n\t", mnemonic, "{l}\t",
605 "{$src2, $dst|$dst, $src2}"),
606 []>, LOCK;
607def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
608 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
609 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
610 !strconcat("lock\n\t", mnemonic, "{q}\t",
611 "{$src2, $dst|$dst, $src2}"),
612 []>, LOCK;
613
614def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
615 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
616 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
617 !strconcat("lock\n\t", mnemonic, "{b}\t",
Eric Christopherb38fe4b2011-05-10 23:57:45 +0000618 "{$src2, $dst|$dst, $src2}"),
Eric Christopher988397d2011-05-10 18:36:16 +0000619 []>, LOCK;
620
621def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
622 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
623 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
624 !strconcat("lock\n\t", mnemonic, "{w}\t",
625 "{$src2, $dst|$dst, $src2}"),
626 []>, LOCK;
627
628def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
629 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
630 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
631 !strconcat("lock\n\t", mnemonic, "{l}\t",
632 "{$src2, $dst|$dst, $src2}"),
633 []>, LOCK;
634
635def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
636 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
637 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
638 !strconcat("lock\n\t", mnemonic, "{q}\t",
639 "{$src2, $dst|$dst, $src2}"),
640 []>, LOCK;
641
642def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
643 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
644 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
645 !strconcat("lock\n\t", mnemonic, "{w}\t",
646 "{$src2, $dst|$dst, $src2}"),
647 []>, LOCK;
648def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
649 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
650 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
651 !strconcat("lock\n\t", mnemonic, "{l}\t",
652 "{$src2, $dst|$dst, $src2}"),
653 []>, LOCK;
654def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
655 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
656 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
657 !strconcat("lock\n\t", mnemonic, "{q}\t",
658 "{$src2, $dst|$dst, $src2}"),
659 []>, LOCK;
660
661}
662
663}
664
665defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, "add">;
666defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, "sub">;
Eric Christopherb38fe4b2011-05-10 23:57:45 +0000667defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, "or">;
Eli Friedmanfc430a62011-08-09 22:17:39 +0000668defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">;
669defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">;
Eric Christopher988397d2011-05-10 18:36:16 +0000670
Chris Lattner010496c2010-10-05 06:22:35 +0000671// Optimized codegen when the non-memory output is not used.
Chris Lattner4d1189f2010-11-01 00:46:16 +0000672let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000673
674def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
675 "lock\n\t"
676 "inc{b}\t$dst", []>, LOCK;
677def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
678 "lock\n\t"
679 "inc{w}\t$dst", []>, OpSize, LOCK;
680def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
681 "lock\n\t"
682 "inc{l}\t$dst", []>, LOCK;
683def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
684 "lock\n\t"
685 "inc{q}\t$dst", []>, LOCK;
686
687def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
688 "lock\n\t"
689 "dec{b}\t$dst", []>, LOCK;
690def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
691 "lock\n\t"
692 "dec{w}\t$dst", []>, OpSize, LOCK;
693def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
694 "lock\n\t"
695 "dec{l}\t$dst", []>, LOCK;
696def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
697 "lock\n\t"
698 "dec{q}\t$dst", []>, LOCK;
699}
700
701// Atomic compare and swap.
Chris Lattner4d1189f2010-11-01 00:46:16 +0000702let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
Eli Friedman43f51ae2011-08-26 21:21:21 +0000703 isCodeGenOnly = 1 in
Chris Lattner010496c2010-10-05 06:22:35 +0000704def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
705 "lock\n\t"
706 "cmpxchg8b\t$ptr",
707 [(X86cas8 addr:$ptr)]>, TB, LOCK;
Eli Friedman43f51ae2011-08-26 21:21:21 +0000708
709let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
710 isCodeGenOnly = 1 in
711def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
712 "lock\n\t"
713 "cmpxchg16b\t$ptr",
714 [(X86cas16 addr:$ptr)]>, TB, LOCK,
715 Requires<[HasCmpxchg16b]>;
716
Chris Lattner4d1189f2010-11-01 00:46:16 +0000717let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000718def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
719 "lock\n\t"
720 "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
721 [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
722}
723
Chris Lattner4d1189f2010-11-01 00:46:16 +0000724let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000725def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
726 "lock\n\t"
727 "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
728 [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
729}
730
Chris Lattner4d1189f2010-11-01 00:46:16 +0000731let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000732def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
733 "lock\n\t"
734 "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
735 [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
736}
737
Chris Lattner4d1189f2010-11-01 00:46:16 +0000738let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000739def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
740 "lock\n\t"
Eli Friedmanf73c8812011-09-13 00:27:04 +0000741 "cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}",
Chris Lattner010496c2010-10-05 06:22:35 +0000742 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
743}
744
745// Atomic exchange and add
Chris Lattner4d1189f2010-11-01 00:46:16 +0000746let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000747def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
748 "lock\n\t"
749 "xadd{b}\t{$val, $ptr|$ptr, $val}",
750 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
751 TB, LOCK;
752def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
753 "lock\n\t"
754 "xadd{w}\t{$val, $ptr|$ptr, $val}",
755 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
756 TB, OpSize, LOCK;
757def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
758 "lock\n\t"
759 "xadd{l}\t{$val, $ptr|$ptr, $val}",
760 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
761 TB, LOCK;
762def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
763 "lock\n\t"
Eli Friedmanf73c8812011-09-13 00:27:04 +0000764 "xadd{q}\t{$val, $ptr|$ptr, $val}",
Chris Lattner010496c2010-10-05 06:22:35 +0000765 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
766 TB, LOCK;
767}
768
Eli Friedmand5ccb052011-09-07 18:48:32 +0000769def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
770 "#ACQUIRE_MOV PSEUDO!",
771 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
772def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
773 "#ACQUIRE_MOV PSEUDO!",
774 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
775def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
776 "#ACQUIRE_MOV PSEUDO!",
777 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
778def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
779 "#ACQUIRE_MOV PSEUDO!",
780 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
781
782def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
783 "#RELEASE_MOV PSEUDO!",
784 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
785def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
786 "#RELEASE_MOV PSEUDO!",
787 [(atomic_store_16 addr:$dst, GR16:$src)]>;
788def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
789 "#RELEASE_MOV PSEUDO!",
790 [(atomic_store_32 addr:$dst, GR32:$src)]>;
791def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
792 "#RELEASE_MOV PSEUDO!",
793 [(atomic_store_64 addr:$dst, GR64:$src)]>;
794
Chris Lattner5673e1d2010-10-05 06:41:40 +0000795//===----------------------------------------------------------------------===//
796// Conditional Move Pseudo Instructions.
797//===----------------------------------------------------------------------===//
798
799
800// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
801// instruction selection into a branch sequence.
802let Uses = [EFLAGS], usesCustomInserter = 1 in {
803 def CMOV_FR32 : I<0, Pseudo,
804 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
805 "#CMOV_FR32 PSEUDO!",
806 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
807 EFLAGS))]>;
808 def CMOV_FR64 : I<0, Pseudo,
809 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
810 "#CMOV_FR64 PSEUDO!",
811 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
812 EFLAGS))]>;
813 def CMOV_V4F32 : I<0, Pseudo,
814 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
815 "#CMOV_V4F32 PSEUDO!",
816 [(set VR128:$dst,
817 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
818 EFLAGS)))]>;
819 def CMOV_V2F64 : I<0, Pseudo,
820 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
821 "#CMOV_V2F64 PSEUDO!",
822 [(set VR128:$dst,
823 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
824 EFLAGS)))]>;
825 def CMOV_V2I64 : I<0, Pseudo,
826 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
827 "#CMOV_V2I64 PSEUDO!",
828 [(set VR128:$dst,
829 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
830 EFLAGS)))]>;
Bruno Cardoso Lopesd40aa242011-08-09 23:27:13 +0000831 def CMOV_V8F32 : I<0, Pseudo,
832 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
833 "#CMOV_V8F32 PSEUDO!",
834 [(set VR256:$dst,
835 (v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond,
836 EFLAGS)))]>;
837 def CMOV_V4F64 : I<0, Pseudo,
838 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
839 "#CMOV_V4F64 PSEUDO!",
840 [(set VR256:$dst,
841 (v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
842 EFLAGS)))]>;
843 def CMOV_V4I64 : I<0, Pseudo,
844 (outs VR256:$dst), (ins VR256:$t, VR256:$f, i8imm:$cond),
845 "#CMOV_V4I64 PSEUDO!",
846 [(set VR256:$dst,
847 (v4i64 (X86cmov VR256:$t, VR256:$f, imm:$cond,
848 EFLAGS)))]>;
Chris Lattner5673e1d2010-10-05 06:41:40 +0000849}
850
Chris Lattner010496c2010-10-05 06:22:35 +0000851
852//===----------------------------------------------------------------------===//
853// DAG Pattern Matching Rules
Chris Lattner87be16a2010-10-05 06:04:14 +0000854//===----------------------------------------------------------------------===//
855
856// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
857def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
858def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
859def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
860def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
861def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
862def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
863
864def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
865 (ADD32ri GR32:$src1, tconstpool:$src2)>;
866def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
867 (ADD32ri GR32:$src1, tjumptable:$src2)>;
868def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
869 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
870def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
871 (ADD32ri GR32:$src1, texternalsym:$src2)>;
872def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
873 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
874
875def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
876 (MOV32mi addr:$dst, tglobaladdr:$src)>;
877def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
878 (MOV32mi addr:$dst, texternalsym:$src)>;
879def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
880 (MOV32mi addr:$dst, tblockaddress:$src)>;
881
882
883
884// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
885// code model mode, should use 'movabs'. FIXME: This is really a hack, the
886// 'movabs' predicate should handle this sort of thing.
887def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
888 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
889def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
890 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
891def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
892 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
893def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
894 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
895def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
896 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
897
898// In static codegen with small code model, we can get the address of a label
899// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
900// the MOV64ri64i32 should accept these.
901def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
902 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
903def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
904 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
905def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
906 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
907def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
908 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
909def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
910 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
911
912// In kernel code model, we can get the address of a label
913// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
914// the MOV64ri32 should accept these.
915def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
916 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
917def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
918 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
919def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
920 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
921def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
922 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
923def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
924 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
925
926// If we have small model and -static mode, it is safe to store global addresses
927// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
928// for MOV64mi32 should handle this sort of thing.
929def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
930 (MOV64mi32 addr:$dst, tconstpool:$src)>,
931 Requires<[NearData, IsStatic]>;
932def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
933 (MOV64mi32 addr:$dst, tjumptable:$src)>,
934 Requires<[NearData, IsStatic]>;
935def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
936 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
937 Requires<[NearData, IsStatic]>;
938def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
939 (MOV64mi32 addr:$dst, texternalsym:$src)>,
940 Requires<[NearData, IsStatic]>;
941def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
942 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
943 Requires<[NearData, IsStatic]>;
944
945
946
947// Calls
948
949// tls has some funny stuff here...
950// This corresponds to movabs $foo@tpoff, %rax
951def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
952 (MOV64ri tglobaltlsaddr :$dst)>;
953// This corresponds to add $foo@tpoff, %rax
954def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
955 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
956// This corresponds to mov foo@tpoff(%rbx), %eax
957def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
958 (MOV64rm tglobaltlsaddr :$dst)>;
959
960
961// Direct PC relative function call for small code model. 32-bit displacement
962// sign extended to 64-bit.
963def : Pat<(X86call (i64 tglobaladdr:$dst)),
Jakob Stoklund Olesen527a08b2012-02-16 17:56:02 +0000964 (CALL64pcrel32 tglobaladdr:$dst)>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000965def : Pat<(X86call (i64 texternalsym:$dst)),
Jakob Stoklund Olesen527a08b2012-02-16 17:56:02 +0000966 (CALL64pcrel32 texternalsym:$dst)>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000967
968// tailcall stuff
969def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
970 (TCRETURNri GR32_TC:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000971 Requires<[In32BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000972
973// FIXME: This is disabled for 32-bit PIC mode because the global base
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000974// register which is part of the address mode may be assigned a
Chris Lattner87be16a2010-10-05 06:04:14 +0000975// callee-saved register.
976def : Pat<(X86tcret (load addr:$dst), imm:$off),
977 (TCRETURNmi addr:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000978 Requires<[In32BitMode, IsNotPIC]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000979
980def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
981 (TCRETURNdi texternalsym:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000982 Requires<[In32BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000983
984def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
985 (TCRETURNdi texternalsym:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000986 Requires<[In32BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000987
NAKAMURA Takumi7754f852011-01-26 02:04:09 +0000988def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
989 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000990 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000991
992def : Pat<(X86tcret (load addr:$dst), imm:$off),
993 (TCRETURNmi64 addr:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000994 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000995
996def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
997 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000998 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000999
1000def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1001 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +00001002 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +00001003
1004// Normal calls, with various flavors of addresses.
1005def : Pat<(X86call (i32 tglobaladdr:$dst)),
1006 (CALLpcrel32 tglobaladdr:$dst)>;
1007def : Pat<(X86call (i32 texternalsym:$dst)),
1008 (CALLpcrel32 texternalsym:$dst)>;
1009def : Pat<(X86call (i32 imm:$dst)),
1010 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1011
Chris Lattner87be16a2010-10-05 06:04:14 +00001012// Comparisons.
1013
1014// TEST R,R is smaller than CMP R,0
1015def : Pat<(X86cmp GR8:$src1, 0),
1016 (TEST8rr GR8:$src1, GR8:$src1)>;
1017def : Pat<(X86cmp GR16:$src1, 0),
1018 (TEST16rr GR16:$src1, GR16:$src1)>;
1019def : Pat<(X86cmp GR32:$src1, 0),
1020 (TEST32rr GR32:$src1, GR32:$src1)>;
1021def : Pat<(X86cmp GR64:$src1, 0),
1022 (TEST64rr GR64:$src1, GR64:$src1)>;
1023
1024// Conditional moves with folded loads with operands swapped and conditions
1025// inverted.
Chris Lattner286997c2010-10-05 22:42:54 +00001026multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1027 Instruction Inst64> {
1028 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1029 (Inst16 GR16:$src2, addr:$src1)>;
1030 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1031 (Inst32 GR32:$src2, addr:$src1)>;
1032 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1033 (Inst64 GR64:$src2, addr:$src1)>;
1034}
Chris Lattner87be16a2010-10-05 06:04:14 +00001035
Chris Lattnerdf72eae2010-10-05 22:51:56 +00001036defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1037defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1038defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1039defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1040defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
Chris Lattner25cbf502010-10-05 23:00:14 +00001041defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
Chris Lattnerdf72eae2010-10-05 22:51:56 +00001042defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1043defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1044defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1045defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1046defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1047defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1048defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1049defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1050defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1051defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
Chris Lattner87be16a2010-10-05 06:04:14 +00001052
1053// zextload bool -> zextload byte
1054def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1055def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1056def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1057def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1058
1059// extload bool -> extload byte
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001060// When extloading from 16-bit and smaller memory locations into 64-bit
1061// registers, use zero-extending loads so that the entire 64-bit register is
Chris Lattner87be16a2010-10-05 06:04:14 +00001062// defined, avoiding partial-register updates.
1063
1064def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1065def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1066def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1067def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1068def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1069def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1070
1071def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
1072def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
1073def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
1074// For other extloads, use subregs, since the high contents of the register are
1075// defined after an extload.
1076def : Pat<(extloadi64i32 addr:$src),
1077 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
1078 sub_32bit)>;
1079
1080// anyext. Define these to do an explicit zero-extend to
1081// avoid partial-register updates.
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001082def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1083 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
Chris Lattner87be16a2010-10-05 06:04:14 +00001084def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1085
1086// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1087def : Pat<(i32 (anyext GR16:$src)),
1088 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1089
1090def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
1091def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
1092def : Pat<(i64 (anyext GR32:$src)),
1093 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1094
Chris Lattnerd8cc2722010-10-05 06:47:35 +00001095
1096// Any instruction that defines a 32-bit result leaves the high half of the
1097// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1098// be copying from a truncate. And x86's cmov doesn't do anything if the
1099// condition is false. But any other 32-bit operation will zero-extend
1100// up to 64 bits.
1101def def32 : PatLeaf<(i32 GR32:$src), [{
1102 return N->getOpcode() != ISD::TRUNCATE &&
1103 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1104 N->getOpcode() != ISD::CopyFromReg &&
1105 N->getOpcode() != X86ISD::CMOV;
1106}]>;
1107
1108// In the case of a 32-bit def that is known to implicitly zero-extend,
1109// we can use a SUBREG_TO_REG.
1110def : Pat<(i64 (zext def32:$src)),
1111 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1112
Chris Lattner87be16a2010-10-05 06:04:14 +00001113//===----------------------------------------------------------------------===//
Chris Lattner99ae6652010-10-08 03:54:52 +00001114// Pattern match OR as ADD
1115//===----------------------------------------------------------------------===//
1116
1117// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1118// 3-addressified into an LEA instruction to avoid copies. However, we also
1119// want to finally emit these instructions as an or at the end of the code
1120// generator to make the generated code easier to read. To do this, we select
1121// into "disjoint bits" pseudo ops.
1122
1123// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1124def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1125 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1126 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1127
1128 unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
1129 APInt Mask = APInt::getAllOnesValue(BitWidth);
1130 APInt KnownZero0, KnownOne0;
1131 CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
1132 APInt KnownZero1, KnownOne1;
1133 CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
1134 return (~KnownZero0 & ~KnownZero1) == 0;
1135}]>;
1136
1137
1138// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1139let AddedComplexity = 5 in { // Try this before the selecting to OR
1140
Evan Chengf735f2d2010-12-15 22:57:36 +00001141let isConvertibleToThreeAddress = 1,
Chris Lattner99ae6652010-10-08 03:54:52 +00001142 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
Evan Chengf735f2d2010-12-15 22:57:36 +00001143let isCommutable = 1 in {
Chris Lattner99ae6652010-10-08 03:54:52 +00001144def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1145 "", // orw/addw REG, REG
1146 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1147def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1148 "", // orl/addl REG, REG
1149 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1150def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1151 "", // orq/addq REG, REG
1152 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
Evan Chengf735f2d2010-12-15 22:57:36 +00001153} // isCommutable
Rafael Espindola6d862802010-10-13 17:14:25 +00001154
1155// NOTE: These are order specific, we want the ri8 forms to be listed
1156// first so that they are slightly preferred to the ri forms.
1157
Chris Lattner15df55d2010-10-08 03:57:25 +00001158def ADD16ri8_DB : I<0, Pseudo,
1159 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1160 "", // orw/addw REG, imm8
1161 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
Rafael Espindola6d862802010-10-13 17:14:25 +00001162def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1163 "", // orw/addw REG, imm
1164 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1165
Chris Lattner15df55d2010-10-08 03:57:25 +00001166def ADD32ri8_DB : I<0, Pseudo,
1167 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1168 "", // orl/addl REG, imm8
1169 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
Rafael Espindola6d862802010-10-13 17:14:25 +00001170def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1171 "", // orl/addl REG, imm
1172 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1173
1174
Chris Lattner15df55d2010-10-08 03:57:25 +00001175def ADD64ri8_DB : I<0, Pseudo,
1176 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1177 "", // orq/addq REG, imm8
1178 [(set GR64:$dst, (or_is_add GR64:$src1,
1179 i64immSExt8:$src2))]>;
Rafael Espindola6d862802010-10-13 17:14:25 +00001180def ADD64ri32_DB : I<0, Pseudo,
1181 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1182 "", // orq/addq REG, imm
1183 [(set GR64:$dst, (or_is_add GR64:$src1,
1184 i64immSExt32:$src2))]>;
Chris Lattner99ae6652010-10-08 03:54:52 +00001185}
Chris Lattner99ae6652010-10-08 03:54:52 +00001186} // AddedComplexity
1187
1188
1189//===----------------------------------------------------------------------===//
Chris Lattner87be16a2010-10-05 06:04:14 +00001190// Some peepholes
1191//===----------------------------------------------------------------------===//
1192
1193// Odd encoding trick: -128 fits into an 8-bit immediate field while
1194// +128 doesn't, so in this special case use a sub instead of an add.
1195def : Pat<(add GR16:$src1, 128),
1196 (SUB16ri8 GR16:$src1, -128)>;
1197def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1198 (SUB16mi8 addr:$dst, -128)>;
1199
1200def : Pat<(add GR32:$src1, 128),
1201 (SUB32ri8 GR32:$src1, -128)>;
1202def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1203 (SUB32mi8 addr:$dst, -128)>;
1204
1205def : Pat<(add GR64:$src1, 128),
1206 (SUB64ri8 GR64:$src1, -128)>;
1207def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1208 (SUB64mi8 addr:$dst, -128)>;
1209
1210// The same trick applies for 32-bit immediate fields in 64-bit
1211// instructions.
1212def : Pat<(add GR64:$src1, 0x0000000080000000),
1213 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1214def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1215 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1216
Rafael Espindoladba81cf2010-10-13 13:31:20 +00001217// To avoid needing to materialize an immediate in a register, use a 32-bit and
1218// with implicit zero-extension instead of a 64-bit and if the immediate has at
1219// least 32 bits of leading zeros. If in addition the last 32 bits can be
1220// represented with a sign extension of a 8 bit constant, use that.
1221
1222def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1223 (SUBREG_TO_REG
1224 (i64 0),
1225 (AND32ri8
1226 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1227 (i32 (GetLo8XForm imm:$imm))),
1228 sub_32bit)>;
1229
Chris Lattner87be16a2010-10-05 06:04:14 +00001230def : Pat<(and GR64:$src, i64immZExt32:$imm),
1231 (SUBREG_TO_REG
1232 (i64 0),
1233 (AND32ri
1234 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1235 (i32 (GetLo32XForm imm:$imm))),
1236 sub_32bit)>;
1237
1238
1239// r & (2^16-1) ==> movz
1240def : Pat<(and GR32:$src1, 0xffff),
1241 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1242// r & (2^8-1) ==> movz
1243def : Pat<(and GR32:$src1, 0xff),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001244 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
Chris Lattner87be16a2010-10-05 06:04:14 +00001245 GR32_ABCD)),
1246 sub_8bit))>,
1247 Requires<[In32BitMode]>;
1248// r & (2^8-1) ==> movz
1249def : Pat<(and GR16:$src1, 0xff),
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001250 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG
1251 (i16 (COPY_TO_REGCLASS GR16:$src1, GR16_ABCD)), sub_8bit)),
1252 sub_16bit)>,
Chris Lattner87be16a2010-10-05 06:04:14 +00001253 Requires<[In32BitMode]>;
1254
1255// r & (2^32-1) ==> movz
1256def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1257 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1258// r & (2^16-1) ==> movz
1259def : Pat<(and GR64:$src, 0xffff),
1260 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1261// r & (2^8-1) ==> movz
1262def : Pat<(and GR64:$src, 0xff),
1263 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1264// r & (2^8-1) ==> movz
1265def : Pat<(and GR32:$src1, 0xff),
1266 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1267 Requires<[In64BitMode]>;
1268// r & (2^8-1) ==> movz
1269def : Pat<(and GR16:$src1, 0xff),
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001270 (EXTRACT_SUBREG (MOVZX32rr8 (i8
1271 (EXTRACT_SUBREG GR16:$src1, sub_8bit))), sub_16bit)>,
Chris Lattner87be16a2010-10-05 06:04:14 +00001272 Requires<[In64BitMode]>;
1273
1274
1275// sext_inreg patterns
1276def : Pat<(sext_inreg GR32:$src, i16),
1277 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1278def : Pat<(sext_inreg GR32:$src, i8),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001279 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001280 GR32_ABCD)),
1281 sub_8bit))>,
1282 Requires<[In32BitMode]>;
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001283
Chris Lattner87be16a2010-10-05 06:04:14 +00001284def : Pat<(sext_inreg GR16:$src, i8),
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001285 (EXTRACT_SUBREG (i32 (MOVSX32rr8 (EXTRACT_SUBREG
1286 (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), sub_8bit))),
1287 sub_16bit)>,
Chris Lattner87be16a2010-10-05 06:04:14 +00001288 Requires<[In32BitMode]>;
1289
1290def : Pat<(sext_inreg GR64:$src, i32),
1291 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1292def : Pat<(sext_inreg GR64:$src, i16),
1293 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1294def : Pat<(sext_inreg GR64:$src, i8),
1295 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1296def : Pat<(sext_inreg GR32:$src, i8),
1297 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1298 Requires<[In64BitMode]>;
1299def : Pat<(sext_inreg GR16:$src, i8),
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001300 (EXTRACT_SUBREG (MOVSX32rr8
1301 (EXTRACT_SUBREG GR16:$src, sub_8bit)), sub_16bit)>,
Chris Lattner87be16a2010-10-05 06:04:14 +00001302 Requires<[In64BitMode]>;
1303
Stuart Hastings0e29ed02011-05-20 19:04:40 +00001304// sext, sext_load, zext, zext_load
1305def: Pat<(i16 (sext GR8:$src)),
1306 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1307def: Pat<(sextloadi16i8 addr:$src),
1308 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1309def: Pat<(i16 (zext GR8:$src)),
1310 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1311def: Pat<(zextloadi16i8 addr:$src),
1312 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
Stuart Hastingsd22f0362011-05-19 17:54:42 +00001313
Chris Lattner87be16a2010-10-05 06:04:14 +00001314// trunc patterns
1315def : Pat<(i16 (trunc GR32:$src)),
1316 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1317def : Pat<(i8 (trunc GR32:$src)),
1318 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1319 sub_8bit)>,
1320 Requires<[In32BitMode]>;
1321def : Pat<(i8 (trunc GR16:$src)),
1322 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1323 sub_8bit)>,
1324 Requires<[In32BitMode]>;
1325def : Pat<(i32 (trunc GR64:$src)),
1326 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1327def : Pat<(i16 (trunc GR64:$src)),
1328 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1329def : Pat<(i8 (trunc GR64:$src)),
1330 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1331def : Pat<(i8 (trunc GR32:$src)),
1332 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1333 Requires<[In64BitMode]>;
1334def : Pat<(i8 (trunc GR16:$src)),
1335 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1336 Requires<[In64BitMode]>;
1337
1338// h-register tricks
1339def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1340 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1341 sub_8bit_hi)>,
1342 Requires<[In32BitMode]>;
1343def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1344 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1345 sub_8bit_hi)>,
1346 Requires<[In32BitMode]>;
1347def : Pat<(srl GR16:$src, (i8 8)),
1348 (EXTRACT_SUBREG
1349 (MOVZX32rr8
1350 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1351 sub_8bit_hi)),
1352 sub_16bit)>,
1353 Requires<[In32BitMode]>;
1354def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001355 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001356 GR16_ABCD)),
1357 sub_8bit_hi))>,
1358 Requires<[In32BitMode]>;
1359def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001360 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001361 GR16_ABCD)),
1362 sub_8bit_hi))>,
1363 Requires<[In32BitMode]>;
1364def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001365 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001366 GR32_ABCD)),
1367 sub_8bit_hi))>,
1368 Requires<[In32BitMode]>;
1369def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001370 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001371 GR32_ABCD)),
1372 sub_8bit_hi))>,
1373 Requires<[In32BitMode]>;
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001374
Chris Lattner87be16a2010-10-05 06:04:14 +00001375// h-register tricks.
1376// For now, be conservative on x86-64 and use an h-register extract only if the
1377// value is immediately zero-extended or stored, which are somewhat common
1378// cases. This uses a bunch of code to prevent a register requiring a REX prefix
1379// from being allocated in the same instruction as the h register, as there's
1380// currently no way to describe this requirement to the register allocator.
1381
1382// h-register extract and zero-extend.
1383def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1384 (SUBREG_TO_REG
1385 (i64 0),
1386 (MOVZX32_NOREXrr8
1387 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1388 sub_8bit_hi)),
1389 sub_32bit)>;
1390def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1391 (MOVZX32_NOREXrr8
1392 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1393 sub_8bit_hi))>,
1394 Requires<[In64BitMode]>;
1395def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001396 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001397 GR32_ABCD)),
1398 sub_8bit_hi))>,
1399 Requires<[In64BitMode]>;
1400def : Pat<(srl GR16:$src, (i8 8)),
1401 (EXTRACT_SUBREG
1402 (MOVZX32_NOREXrr8
1403 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1404 sub_8bit_hi)),
1405 sub_16bit)>,
1406 Requires<[In64BitMode]>;
1407def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1408 (MOVZX32_NOREXrr8
1409 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1410 sub_8bit_hi))>,
1411 Requires<[In64BitMode]>;
1412def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1413 (MOVZX32_NOREXrr8
1414 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1415 sub_8bit_hi))>,
1416 Requires<[In64BitMode]>;
1417def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1418 (SUBREG_TO_REG
1419 (i64 0),
1420 (MOVZX32_NOREXrr8
1421 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1422 sub_8bit_hi)),
1423 sub_32bit)>;
1424def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1425 (SUBREG_TO_REG
1426 (i64 0),
1427 (MOVZX32_NOREXrr8
1428 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1429 sub_8bit_hi)),
1430 sub_32bit)>;
1431
1432// h-register extract and store.
1433def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1434 (MOV8mr_NOREX
1435 addr:$dst,
1436 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1437 sub_8bit_hi))>;
1438def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1439 (MOV8mr_NOREX
1440 addr:$dst,
1441 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1442 sub_8bit_hi))>,
1443 Requires<[In64BitMode]>;
1444def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1445 (MOV8mr_NOREX
1446 addr:$dst,
1447 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1448 sub_8bit_hi))>,
1449 Requires<[In64BitMode]>;
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001450
1451
Chris Lattner87be16a2010-10-05 06:04:14 +00001452// (shl x, 1) ==> (add x, x)
Dan Gohmana0697a72011-06-16 15:55:48 +00001453// Note that if x is undef (immediate or otherwise), we could theoretically
1454// end up with the two uses of x getting different values, producing a result
1455// where the least significant bit is not 0. However, the probability of this
1456// happening is considered low enough that this is officially not a
1457// "real problem".
Chris Lattner87be16a2010-10-05 06:04:14 +00001458def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1459def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1460def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1461def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1462
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001463// Helper imms that check if a mask doesn't change significant shift bits.
1464def immShift32 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 5; }]>;
1465def immShift64 : ImmLeaf<i8, [{ return CountTrailingOnes_32(Imm) >= 6; }]>;
1466
Chris Lattner87be16a2010-10-05 06:04:14 +00001467// (shl x (and y, 31)) ==> (shl x, y)
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001468def : Pat<(shl GR8:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001469 (SHL8rCL GR8:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001470def : Pat<(shl GR16:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001471 (SHL16rCL GR16:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001472def : Pat<(shl GR32:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001473 (SHL32rCL GR32:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001474def : Pat<(store (shl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001475 (SHL8mCL addr:$dst)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001476def : Pat<(store (shl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001477 (SHL16mCL addr:$dst)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001478def : Pat<(store (shl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001479 (SHL32mCL addr:$dst)>;
1480
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001481def : Pat<(srl GR8:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001482 (SHR8rCL GR8:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001483def : Pat<(srl GR16:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001484 (SHR16rCL GR16:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001485def : Pat<(srl GR32:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001486 (SHR32rCL GR32:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001487def : Pat<(store (srl (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001488 (SHR8mCL addr:$dst)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001489def : Pat<(store (srl (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001490 (SHR16mCL addr:$dst)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001491def : Pat<(store (srl (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001492 (SHR32mCL addr:$dst)>;
1493
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001494def : Pat<(sra GR8:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001495 (SAR8rCL GR8:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001496def : Pat<(sra GR16:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001497 (SAR16rCL GR16:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001498def : Pat<(sra GR32:$src1, (and CL, immShift32)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001499 (SAR32rCL GR32:$src1)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001500def : Pat<(store (sra (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001501 (SAR8mCL addr:$dst)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001502def : Pat<(store (sra (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001503 (SAR16mCL addr:$dst)>;
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001504def : Pat<(store (sra (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
Chris Lattner87be16a2010-10-05 06:04:14 +00001505 (SAR32mCL addr:$dst)>;
1506
1507// (shl x (and y, 63)) ==> (shl x, y)
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001508def : Pat<(shl GR64:$src1, (and CL, immShift64)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001509 (SHL64rCL GR64:$src1)>;
1510def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1511 (SHL64mCL addr:$dst)>;
1512
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001513def : Pat<(srl GR64:$src1, (and CL, immShift64)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001514 (SHR64rCL GR64:$src1)>;
1515def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1516 (SHR64mCL addr:$dst)>;
1517
Benjamin Kramerfb418ba2012-01-12 12:41:34 +00001518def : Pat<(sra GR64:$src1, (and CL, immShift64)),
Chris Lattner87be16a2010-10-05 06:04:14 +00001519 (SAR64rCL GR64:$src1)>;
1520def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1521 (SAR64mCL addr:$dst)>;
1522
1523
1524// (anyext (setcc_carry)) -> (setcc_carry)
1525def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1526 (SETB_C16r)>;
1527def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1528 (SETB_C32r)>;
1529def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1530 (SETB_C32r)>;
1531
Chris Lattner99ae6652010-10-08 03:54:52 +00001532
1533
Chris Lattner87be16a2010-10-05 06:04:14 +00001534
1535//===----------------------------------------------------------------------===//
1536// EFLAGS-defining Patterns
1537//===----------------------------------------------------------------------===//
1538
1539// add reg, reg
1540def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1541def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1542def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1543
1544// add reg, mem
1545def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1546 (ADD8rm GR8:$src1, addr:$src2)>;
1547def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1548 (ADD16rm GR16:$src1, addr:$src2)>;
1549def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1550 (ADD32rm GR32:$src1, addr:$src2)>;
1551
1552// add reg, imm
1553def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1554def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1555def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1556def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1557 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1558def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1559 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1560
1561// sub reg, reg
1562def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1563def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1564def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1565
1566// sub reg, mem
1567def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1568 (SUB8rm GR8:$src1, addr:$src2)>;
1569def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1570 (SUB16rm GR16:$src1, addr:$src2)>;
1571def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1572 (SUB32rm GR32:$src1, addr:$src2)>;
1573
1574// sub reg, imm
1575def : Pat<(sub GR8:$src1, imm:$src2),
1576 (SUB8ri GR8:$src1, imm:$src2)>;
1577def : Pat<(sub GR16:$src1, imm:$src2),
1578 (SUB16ri GR16:$src1, imm:$src2)>;
1579def : Pat<(sub GR32:$src1, imm:$src2),
1580 (SUB32ri GR32:$src1, imm:$src2)>;
1581def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1582 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1583def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1584 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1585
1586// mul reg, reg
1587def : Pat<(mul GR16:$src1, GR16:$src2),
1588 (IMUL16rr GR16:$src1, GR16:$src2)>;
1589def : Pat<(mul GR32:$src1, GR32:$src2),
1590 (IMUL32rr GR32:$src1, GR32:$src2)>;
1591
1592// mul reg, mem
1593def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1594 (IMUL16rm GR16:$src1, addr:$src2)>;
1595def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1596 (IMUL32rm GR32:$src1, addr:$src2)>;
1597
1598// mul reg, imm
1599def : Pat<(mul GR16:$src1, imm:$src2),
1600 (IMUL16rri GR16:$src1, imm:$src2)>;
1601def : Pat<(mul GR32:$src1, imm:$src2),
1602 (IMUL32rri GR32:$src1, imm:$src2)>;
1603def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1604 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1605def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1606 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1607
1608// reg = mul mem, imm
1609def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1610 (IMUL16rmi addr:$src1, imm:$src2)>;
1611def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1612 (IMUL32rmi addr:$src1, imm:$src2)>;
1613def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1614 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1615def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1616 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1617
Chris Lattner87be16a2010-10-05 06:04:14 +00001618// Patterns for nodes that do not produce flags, for instructions that do.
1619
1620// addition
1621def : Pat<(add GR64:$src1, GR64:$src2),
1622 (ADD64rr GR64:$src1, GR64:$src2)>;
1623def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1624 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1625def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1626 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1627def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1628 (ADD64rm GR64:$src1, addr:$src2)>;
1629
1630// subtraction
1631def : Pat<(sub GR64:$src1, GR64:$src2),
1632 (SUB64rr GR64:$src1, GR64:$src2)>;
1633def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1634 (SUB64rm GR64:$src1, addr:$src2)>;
1635def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1636 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1637def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1638 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1639
1640// Multiply
1641def : Pat<(mul GR64:$src1, GR64:$src2),
1642 (IMUL64rr GR64:$src1, GR64:$src2)>;
1643def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1644 (IMUL64rm GR64:$src1, addr:$src2)>;
1645def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1646 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1647def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1648 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1649def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1650 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1651def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1652 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1653
1654// Increment reg.
1655def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
1656def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
1657def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1658def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
1659def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1660def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1661
1662// Decrement reg.
1663def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
1664def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
1665def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1666def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
1667def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1668def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1669
1670// or reg/reg.
1671def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1672def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1673def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1674def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1675
1676// or reg/mem
1677def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1678 (OR8rm GR8:$src1, addr:$src2)>;
1679def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1680 (OR16rm GR16:$src1, addr:$src2)>;
1681def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1682 (OR32rm GR32:$src1, addr:$src2)>;
1683def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1684 (OR64rm GR64:$src1, addr:$src2)>;
1685
1686// or reg/imm
1687def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1688def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1689def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1690def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1691 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1692def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1693 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1694def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1695 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1696def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1697 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1698
1699// xor reg/reg
1700def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1701def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1702def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1703def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1704
1705// xor reg/mem
1706def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1707 (XOR8rm GR8:$src1, addr:$src2)>;
1708def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1709 (XOR16rm GR16:$src1, addr:$src2)>;
1710def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1711 (XOR32rm GR32:$src1, addr:$src2)>;
1712def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1713 (XOR64rm GR64:$src1, addr:$src2)>;
1714
1715// xor reg/imm
1716def : Pat<(xor GR8:$src1, imm:$src2),
1717 (XOR8ri GR8:$src1, imm:$src2)>;
1718def : Pat<(xor GR16:$src1, imm:$src2),
1719 (XOR16ri GR16:$src1, imm:$src2)>;
1720def : Pat<(xor GR32:$src1, imm:$src2),
1721 (XOR32ri GR32:$src1, imm:$src2)>;
1722def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1723 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1724def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1725 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1726def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1727 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1728def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1729 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1730
1731// and reg/reg
1732def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1733def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1734def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1735def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1736
1737// and reg/mem
1738def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1739 (AND8rm GR8:$src1, addr:$src2)>;
1740def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1741 (AND16rm GR16:$src1, addr:$src2)>;
1742def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1743 (AND32rm GR32:$src1, addr:$src2)>;
1744def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1745 (AND64rm GR64:$src1, addr:$src2)>;
1746
1747// and reg/imm
1748def : Pat<(and GR8:$src1, imm:$src2),
1749 (AND8ri GR8:$src1, imm:$src2)>;
1750def : Pat<(and GR16:$src1, imm:$src2),
1751 (AND16ri GR16:$src1, imm:$src2)>;
1752def : Pat<(and GR32:$src1, imm:$src2),
1753 (AND32ri GR32:$src1, imm:$src2)>;
1754def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1755 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1756def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1757 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1758def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1759 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1760def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1761 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
Chandler Carruthf2d76932011-12-20 11:19:37 +00001762
1763// Bit scan instruction patterns to match explicit zero-undef behavior.
1764def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1765def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
1766def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
1767def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
1768def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
1769def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;