blob: b28cd83551ba2e01eeb8145c1682846cb42fdab3 [file] [log] [blame]
Chris Lattner87be16a2010-10-05 06:04:14 +00001//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
Michael J. Spencer6e56b182010-10-20 23:40:27 +00002//
Chris Lattner87be16a2010-10-05 06:04:14 +00003// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Michael J. Spencer6e56b182010-10-20 23:40:27 +00007//
Chris Lattner87be16a2010-10-05 06:04:14 +00008//===----------------------------------------------------------------------===//
9//
10// This file describes the various pseudo instructions used by the compiler,
11// as well as Pat patterns used during instruction selection.
12//
13//===----------------------------------------------------------------------===//
14
Chris Lattner41efbfa2010-10-05 06:37:31 +000015//===----------------------------------------------------------------------===//
16// Pattern Matching Support
17
18def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue());
21}]>;
22
Rafael Espindoladba81cf2010-10-13 13:31:20 +000023def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue());
26}]>;
27
Chris Lattner41efbfa2010-10-05 06:37:31 +000028
29//===----------------------------------------------------------------------===//
30// Random Pseudo Instructions.
31
Chris Lattner8af88ef2010-10-05 06:10:16 +000032// PIC base construction. This expands to code that looks like this:
33// call $next_inst
34// popl %destreg"
35let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
36 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
37 "", []>;
38
39
40// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
41// a stack adjustment and the codegen must know that they may modify the stack
42// pointer before prolog-epilog rewriting occurs.
43// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
44// sub / add which can clobber EFLAGS.
45let Defs = [ESP, EFLAGS], Uses = [ESP] in {
46def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
47 "#ADJCALLSTACKDOWN",
48 [(X86callseq_start timm:$amt)]>,
49 Requires<[In32BitMode]>;
50def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
51 "#ADJCALLSTACKUP",
52 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
53 Requires<[In32BitMode]>;
54}
55
56// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
57// a stack adjustment and the codegen must know that they may modify the stack
58// pointer before prolog-epilog rewriting occurs.
59// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
60// sub / add which can clobber EFLAGS.
61let Defs = [RSP, EFLAGS], Uses = [RSP] in {
62def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
63 "#ADJCALLSTACKDOWN",
64 [(X86callseq_start timm:$amt)]>,
65 Requires<[In64BitMode]>;
66def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
67 "#ADJCALLSTACKUP",
68 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
69 Requires<[In64BitMode]>;
70}
71
72
73
74// x86-64 va_start lowering magic.
75let usesCustomInserter = 1 in {
76def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
77 (outs),
78 (ins GR8:$al,
79 i64imm:$regsavefi, i64imm:$offset,
80 variable_ops),
81 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
82 [(X86vastart_save_xmm_regs GR8:$al,
83 imm:$regsavefi,
84 imm:$offset)]>;
85
Dan Gohman320afb82010-10-12 18:00:49 +000086// The VAARG_64 pseudo-instruction takes the address of the va_list,
87// and places the address of the next argument into a register.
88let Defs = [EFLAGS] in
89def VAARG_64 : I<0, Pseudo,
90 (outs GR64:$dst),
91 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
92 "#VAARG_64 $dst, $ap, $size, $mode, $align",
93 [(set GR64:$dst,
94 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
95 (implicit EFLAGS)]>;
96
Michael J. Spencere9c253e2010-10-21 01:41:01 +000097// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
98// targets. These calls are needed to probe the stack when allocating more than
99// 4k bytes in one go. Touching the stack at 4K increments is necessary to
100// ensure that the guard pages used by the OS virtual memory manager are
101// allocated in correct sequence.
Chris Lattner8af88ef2010-10-05 06:10:16 +0000102// The main point of having separate instruction are extra unmodelled effects
103// (compared to ordinary calls) like stack pointer change.
104
105let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
Michael J. Spencere9c253e2010-10-21 01:41:01 +0000106 def WIN_ALLOCA : I<0, Pseudo, (outs), (ins),
107 "# dynamic stack allocation",
108 [(X86WinAlloca)]>;
Chris Lattner8af88ef2010-10-05 06:10:16 +0000109}
110
111
Chris Lattner87be16a2010-10-05 06:04:14 +0000112
113//===----------------------------------------------------------------------===//
114// EH Pseudo Instructions
115//
116let isTerminator = 1, isReturn = 1, isBarrier = 1,
117 hasCtrlDep = 1, isCodeGenOnly = 1 in {
118def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
119 "ret\t#eh_return, addr: $addr",
120 [(X86ehret GR32:$addr)]>;
121
122}
123
124let isTerminator = 1, isReturn = 1, isBarrier = 1,
125 hasCtrlDep = 1, isCodeGenOnly = 1 in {
126def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
127 "ret\t#eh_return, addr: $addr",
128 [(X86ehret GR64:$addr)]>;
129
130}
131
Chris Lattner8af88ef2010-10-05 06:10:16 +0000132//===----------------------------------------------------------------------===//
133// Alias Instructions
134//===----------------------------------------------------------------------===//
135
136// Alias instructions that map movr0 to xor.
137// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
138// FIXME: Set encoding to pseudo.
139let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
140 isCodeGenOnly = 1 in {
141def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "",
142 [(set GR8:$dst, 0)]>;
143
144// We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller
145// encoding and avoids a partial-register update sometimes, but doing so
146// at isel time interferes with rematerialization in the current register
147// allocator. For now, this is rewritten when the instruction is lowered
148// to an MCInst.
149def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins),
150 "",
151 [(set GR16:$dst, 0)]>, OpSize;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000152
Chris Lattner8af88ef2010-10-05 06:10:16 +0000153// FIXME: Set encoding to pseudo.
154def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
155 [(set GR32:$dst, 0)]>;
156}
157
Chris Lattner010496c2010-10-05 06:22:35 +0000158// We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a
159// smaller encoding, but doing so at isel time interferes with rematerialization
160// in the current register allocator. For now, this is rewritten when the
161// instruction is lowered to an MCInst.
162// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
163// when we have a better way to specify isel priority.
Chris Lattnera4a3a5e2010-10-31 19:15:18 +0000164let Defs = [EFLAGS], isCodeGenOnly=1,
Chris Lattner010496c2010-10-05 06:22:35 +0000165 AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
166def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
167 [(set GR64:$dst, 0)]>;
168
169// Materialize i64 constant where top 32-bits are zero. This could theoretically
170// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
171// that would make it more difficult to rematerialize.
Chris Lattnera4a3a5e2010-10-31 19:15:18 +0000172let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
173 isCodeGenOnly = 1 in
Chris Lattner010496c2010-10-05 06:22:35 +0000174def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
175 "", [(set GR64:$dst, i64immZExt32:$src)]>;
176
Chris Lattner2c383d82010-10-05 21:18:04 +0000177// Use sbb to materialize carry bit.
178let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
179// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
Chris Lattner35649fc2010-10-05 06:33:16 +0000180// However, Pat<> can't replicate the destination reg into the inputs of the
181// result.
Chris Lattner2c383d82010-10-05 21:18:04 +0000182// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
Chris Lattner35649fc2010-10-05 06:33:16 +0000183// X86CodeEmitter.
Chris Lattner2c383d82010-10-05 21:18:04 +0000184def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
185 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
186def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
187 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
188 OpSize;
189def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
190 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
Chris Lattner35649fc2010-10-05 06:33:16 +0000191def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
192 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
Chris Lattner2c383d82010-10-05 21:18:04 +0000193} // isCodeGenOnly
194
Chris Lattner35649fc2010-10-05 06:33:16 +0000195
Chris Lattnerc19d1c32010-12-19 22:08:31 +0000196def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
197 (SETB_C16r)>;
198def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
199 (SETB_C32r)>;
Chris Lattner35649fc2010-10-05 06:33:16 +0000200def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
201 (SETB_C64r)>;
202
Chris Lattnerc19d1c32010-12-19 22:08:31 +0000203def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
204 (SETB_C16r)>;
205def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
206 (SETB_C32r)>;
207def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
208 (SETB_C64r)>;
209
Chris Lattner39ffcb72010-12-20 01:16:03 +0000210// We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
211// will be eliminated and that the sbb can be extended up to a wider type. When
212// this happens, it is great. However, if we are left with an 8-bit sbb and an
213// and, we might as well just match it as a setb.
214def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
215 (SETBr)>;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000216
Benjamin Kramerf51190b2011-05-08 18:36:07 +0000217// (add OP, SETB) -> (adc OP, 0)
218def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
219 (ADC8ri GR8:$op, 0)>;
220def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
221 (ADC32ri8 GR32:$op, 0)>;
222def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
223 (ADC64ri8 GR64:$op, 0)>;
224
225// (sub OP, SETB) -> (sbb OP, 0)
226def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
227 (SBB8ri GR8:$op, 0)>;
228def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
229 (SBB32ri8 GR32:$op, 0)>;
230def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
231 (SBB64ri8 GR64:$op, 0)>;
232
233// (sub OP, SETCC_CARRY) -> (adc OP, 0)
234def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
235 (ADC8ri GR8:$op, 0)>;
236def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
237 (ADC32ri8 GR32:$op, 0)>;
238def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
239 (ADC64ri8 GR64:$op, 0)>;
240
Chris Lattnerd3f033d2010-10-05 06:27:48 +0000241//===----------------------------------------------------------------------===//
242// String Pseudo Instructions
243//
244let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
245def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
246 [(X86rep_movs i8)]>, REP;
247def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
248 [(X86rep_movs i16)]>, REP, OpSize;
249def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
250 [(X86rep_movs i32)]>, REP;
251}
252
253let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
254def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
255 [(X86rep_movs i64)]>, REP;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000256
Chris Lattnerd3f033d2010-10-05 06:27:48 +0000257
258// FIXME: Should use "(X86rep_stos AL)" as the pattern.
259let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in
260def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
261 [(X86rep_stos i8)]>, REP;
262let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in
263def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
264 [(X86rep_stos i16)]>, REP, OpSize;
265let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in
266def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
267 [(X86rep_stos i32)]>, REP;
268
269let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
270def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
271 [(X86rep_stos i64)]>, REP;
Chris Lattner010496c2010-10-05 06:22:35 +0000272
273
Chris Lattner8af88ef2010-10-05 06:10:16 +0000274//===----------------------------------------------------------------------===//
275// Thread Local Storage Instructions
276//
277
278// ELF TLS Support
279// All calls clobber the non-callee saved registers. ESP is marked as
280// a use to prevent stack-pointer assignments that appear immediately
281// before calls from potentially appearing dead.
282let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
283 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
284 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
285 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Rafael Espindolad652dbe2010-11-28 21:16:39 +0000286 Uses = [ESP] in
Chris Lattner8af88ef2010-10-05 06:10:16 +0000287def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
Rafael Espindola5bf7c532010-11-27 20:43:02 +0000288 "# TLS_addr32",
Chris Lattner8af88ef2010-10-05 06:10:16 +0000289 [(X86tlsaddr tls32addr:$sym)]>,
290 Requires<[In32BitMode]>;
291
292// All calls clobber the non-callee saved registers. RSP is marked as
293// a use to prevent stack-pointer assignments that appear immediately
294// before calls from potentially appearing dead.
295let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
296 FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
297 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
298 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
299 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Rafael Espindolad652dbe2010-11-28 21:16:39 +0000300 Uses = [RSP] in
Chris Lattner8af88ef2010-10-05 06:10:16 +0000301def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
Rafael Espindola5bf7c532010-11-27 20:43:02 +0000302 "# TLS_addr64",
Chris Lattner8af88ef2010-10-05 06:10:16 +0000303 [(X86tlsaddr tls64addr:$sym)]>,
304 Requires<[In64BitMode]>;
305
306// Darwin TLS Support
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000307// For i386, the address of the thunk is passed on the stack, on return the
308// address of the variable is in %eax. %ecx is trashed during the function
Chris Lattner8af88ef2010-10-05 06:10:16 +0000309// call. All other registers are preserved.
Eric Christophercdfe3c32011-01-18 01:37:20 +0000310let Defs = [EAX, ECX, EFLAGS],
Chris Lattner8af88ef2010-10-05 06:10:16 +0000311 Uses = [ESP],
312 usesCustomInserter = 1 in
313def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
314 "# TLSCall_32",
315 [(X86TLSCall addr:$sym)]>,
316 Requires<[In32BitMode]>;
317
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000318// For x86_64, the address of the thunk is passed in %rdi, on return
Chris Lattner8af88ef2010-10-05 06:10:16 +0000319// the address of the variable is in %rax. All other registers are preserved.
Eric Christophercdfe3c32011-01-18 01:37:20 +0000320let Defs = [RAX, EFLAGS],
Eric Christopher28717682010-12-09 00:26:41 +0000321 Uses = [RSP, RDI],
Chris Lattner8af88ef2010-10-05 06:10:16 +0000322 usesCustomInserter = 1 in
323def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
324 "# TLSCall_64",
325 [(X86TLSCall addr:$sym)]>,
326 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000327
Chris Lattner6dbbff92010-10-05 23:09:10 +0000328
329//===----------------------------------------------------------------------===//
330// Conditional Move Pseudo Instructions
331
332let Constraints = "$src1 = $dst" in {
333
334// Conditional moves
335let Uses = [EFLAGS] in {
336
337// X86 doesn't have 8-bit conditional moves. Use a customInserter to
338// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
339// however that requires promoting the operands, and can induce additional
340// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
341// clobber EFLAGS, because if one of the operands is zero, the expansion
342// could involve an xor.
343let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
344def CMOV_GR8 : I<0, Pseudo,
345 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
346 "#CMOV_GR8 PSEUDO!",
347 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
348 imm:$cond, EFLAGS))]>;
349
350let Predicates = [NoCMov] in {
351def CMOV_GR32 : I<0, Pseudo,
352 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
353 "#CMOV_GR32* PSEUDO!",
354 [(set GR32:$dst,
355 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
356def CMOV_GR16 : I<0, Pseudo,
357 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
358 "#CMOV_GR16* PSEUDO!",
359 [(set GR16:$dst,
360 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
361def CMOV_RFP32 : I<0, Pseudo,
362 (outs RFP32:$dst),
363 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
364 "#CMOV_RFP32 PSEUDO!",
365 [(set RFP32:$dst,
366 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
367 EFLAGS))]>;
368def CMOV_RFP64 : I<0, Pseudo,
369 (outs RFP64:$dst),
370 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
371 "#CMOV_RFP64 PSEUDO!",
372 [(set RFP64:$dst,
373 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
374 EFLAGS))]>;
375def CMOV_RFP80 : I<0, Pseudo,
376 (outs RFP80:$dst),
377 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
378 "#CMOV_RFP80 PSEUDO!",
379 [(set RFP80:$dst,
380 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
381 EFLAGS))]>;
382} // Predicates = [NoCMov]
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000383} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
Chris Lattner6dbbff92010-10-05 23:09:10 +0000384} // Uses = [EFLAGS]
385
386} // Constraints = "$src1 = $dst" in
387
388
Chris Lattner87be16a2010-10-05 06:04:14 +0000389//===----------------------------------------------------------------------===//
Chris Lattner010496c2010-10-05 06:22:35 +0000390// Atomic Instruction Pseudo Instructions
391//===----------------------------------------------------------------------===//
392
393// Atomic exchange, and, or, xor
394let Constraints = "$val = $dst", Defs = [EFLAGS],
395 usesCustomInserter = 1 in {
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000396
Chris Lattner010496c2010-10-05 06:22:35 +0000397def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000398 "#ATOMAND8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000399 [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>;
400def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000401 "#ATOMOR8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000402 [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>;
403def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000404 "#ATOMXOR8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000405 [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>;
406def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000407 "#ATOMNAND8 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000408 [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>;
409
410def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000411 "#ATOMAND16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000412 [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>;
413def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000414 "#ATOMOR16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000415 [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>;
416def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000417 "#ATOMXOR16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000418 [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>;
419def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000420 "#ATOMNAND16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000421 [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>;
422def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000423 "#ATOMMIN16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000424 [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>;
425def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000426 "#ATOMMAX16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000427 [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>;
428def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000429 "#ATOMUMIN16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000430 [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>;
431def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000432 "#ATOMUMAX16 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000433 [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>;
434
435
436def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000437 "#ATOMAND32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000438 [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>;
439def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000440 "#ATOMOR32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000441 [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>;
442def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000443 "#ATOMXOR32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000444 [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>;
445def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000446 "#ATOMNAND32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000447 [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>;
448def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000449 "#ATOMMIN32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000450 [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>;
451def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000452 "#ATOMMAX32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000453 [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>;
454def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000455 "#ATOMUMIN32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000456 [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>;
457def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000458 "#ATOMUMAX32 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000459 [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>;
460
461
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000462
Chris Lattner010496c2010-10-05 06:22:35 +0000463def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000464 "#ATOMAND64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000465 [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>;
466def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000467 "#ATOMOR64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000468 [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>;
469def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000470 "#ATOMXOR64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000471 [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>;
472def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000473 "#ATOMNAND64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000474 [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>;
475def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000476 "#ATOMMIN64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000477 [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>;
478def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000479 "#ATOMMAX64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000480 [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>;
481def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000482 "#ATOMUMIN64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000483 [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>;
484def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000485 "#ATOMUMAX64 PSEUDO!",
Chris Lattner010496c2010-10-05 06:22:35 +0000486 [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>;
487}
488
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000489let Constraints = "$val1 = $dst1, $val2 = $dst2",
Chris Lattner010496c2010-10-05 06:22:35 +0000490 Defs = [EFLAGS, EAX, EBX, ECX, EDX],
491 Uses = [EAX, EBX, ECX, EDX],
492 mayLoad = 1, mayStore = 1,
493 usesCustomInserter = 1 in {
494def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
495 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
496 "#ATOMAND6432 PSEUDO!", []>;
497def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
498 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
499 "#ATOMOR6432 PSEUDO!", []>;
500def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
501 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
502 "#ATOMXOR6432 PSEUDO!", []>;
503def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
504 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
505 "#ATOMNAND6432 PSEUDO!", []>;
506def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
507 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
508 "#ATOMADD6432 PSEUDO!", []>;
509def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
510 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
511 "#ATOMSUB6432 PSEUDO!", []>;
512def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
513 (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
514 "#ATOMSWAP6432 PSEUDO!", []>;
515}
516
517//===----------------------------------------------------------------------===//
518// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
519//===----------------------------------------------------------------------===//
520
521// FIXME: Use normal instructions and add lock prefix dynamically.
522
523// Memory barriers
524
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000525// TODO: Get this to fold the constant into the instruction.
Chris Lattner4d1189f2010-11-01 00:46:16 +0000526let isCodeGenOnly = 1 in
Chris Lattner010496c2010-10-05 06:22:35 +0000527def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
528 "lock\n\t"
529 "or{l}\t{$zero, $dst|$dst, $zero}",
530 []>, Requires<[In32BitMode]>, LOCK;
531
532let hasSideEffects = 1 in
533def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
534 "#MEMBARRIER",
535 [(X86MemBarrier)]>, Requires<[HasSSE2]>;
536
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000537// TODO: Get this to fold the constant into the instruction.
Chris Lattner4d1189f2010-11-01 00:46:16 +0000538let hasSideEffects = 1, Defs = [ESP], isCodeGenOnly = 1 in
Chris Lattner010496c2010-10-05 06:22:35 +0000539def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
540 "lock\n\t"
541 "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
542 [(X86MemBarrierNoSSE GR64:$zero)]>,
543 Requires<[In64BitMode]>, LOCK;
544
545
546// Optimized codegen when the non-memory output is not used.
Chris Lattner4d1189f2010-11-01 00:46:16 +0000547let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000548def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
549 "lock\n\t"
550 "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
551def LOCK_ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
552 "lock\n\t"
553 "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
554def LOCK_ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
555 "lock\n\t"
556 "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
557def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
558 "lock\n\t"
559 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000560
Chris Lattner010496c2010-10-05 06:22:35 +0000561def LOCK_ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2),
562 "lock\n\t"
563 "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
564def LOCK_ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2),
565 "lock\n\t"
566 "add{w}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
567def LOCK_ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2),
568 "lock\n\t"
569 "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
570def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs),
571 (ins i64mem:$dst, i64i32imm :$src2),
572 "lock\n\t"
573 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
574
575def LOCK_ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
576 "lock\n\t"
577 "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
578def LOCK_ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
579 "lock\n\t"
580 "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
581def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
582 (ins i64mem:$dst, i64i8imm :$src2),
583 "lock\n\t"
584 "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
585
586def LOCK_SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
587 "lock\n\t"
588 "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
589def LOCK_SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
590 "lock\n\t"
591 "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000592def LOCK_SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
Chris Lattner010496c2010-10-05 06:22:35 +0000593 "lock\n\t"
594 "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000595def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
Chris Lattner010496c2010-10-05 06:22:35 +0000596 "lock\n\t"
597 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
598
599
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000600def LOCK_SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2),
Chris Lattner010496c2010-10-05 06:22:35 +0000601 "lock\n\t"
602 "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000603def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2),
Chris Lattner010496c2010-10-05 06:22:35 +0000604 "lock\n\t"
605 "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000606def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2),
Chris Lattner010496c2010-10-05 06:22:35 +0000607 "lock\n\t"
608 "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
609def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs),
610 (ins i64mem:$dst, i64i32imm:$src2),
611 "lock\n\t"
612 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
613
614
615def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2),
616 "lock\n\t"
617 "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK;
618def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2),
619 "lock\n\t"
620 "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
621def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs),
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000622 (ins i64mem:$dst, i64i8imm :$src2),
Chris Lattner010496c2010-10-05 06:22:35 +0000623 "lock\n\t"
624 "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
625
626def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
627 "lock\n\t"
628 "inc{b}\t$dst", []>, LOCK;
629def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
630 "lock\n\t"
631 "inc{w}\t$dst", []>, OpSize, LOCK;
632def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
633 "lock\n\t"
634 "inc{l}\t$dst", []>, LOCK;
635def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
636 "lock\n\t"
637 "inc{q}\t$dst", []>, LOCK;
638
639def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
640 "lock\n\t"
641 "dec{b}\t$dst", []>, LOCK;
642def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
643 "lock\n\t"
644 "dec{w}\t$dst", []>, OpSize, LOCK;
645def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
646 "lock\n\t"
647 "dec{l}\t$dst", []>, LOCK;
648def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
649 "lock\n\t"
650 "dec{q}\t$dst", []>, LOCK;
651}
652
653// Atomic compare and swap.
Chris Lattner4d1189f2010-11-01 00:46:16 +0000654let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
655 isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000656def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
657 "lock\n\t"
658 "cmpxchg8b\t$ptr",
659 [(X86cas8 addr:$ptr)]>, TB, LOCK;
660}
Chris Lattner4d1189f2010-11-01 00:46:16 +0000661let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000662def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
663 "lock\n\t"
664 "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}",
665 [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
666}
667
Chris Lattner4d1189f2010-11-01 00:46:16 +0000668let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000669def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap),
670 "lock\n\t"
671 "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}",
672 [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK;
673}
674
Chris Lattner4d1189f2010-11-01 00:46:16 +0000675let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000676def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap),
677 "lock\n\t"
678 "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}",
679 [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK;
680}
681
Chris Lattner4d1189f2010-11-01 00:46:16 +0000682let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000683def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
684 "lock\n\t"
685 "cmpxchgq\t$swap,$ptr",
686 [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
687}
688
689// Atomic exchange and add
Chris Lattner4d1189f2010-11-01 00:46:16 +0000690let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in {
Chris Lattner010496c2010-10-05 06:22:35 +0000691def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr),
692 "lock\n\t"
693 "xadd{b}\t{$val, $ptr|$ptr, $val}",
694 [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
695 TB, LOCK;
696def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr),
697 "lock\n\t"
698 "xadd{w}\t{$val, $ptr|$ptr, $val}",
699 [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
700 TB, OpSize, LOCK;
701def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr),
702 "lock\n\t"
703 "xadd{l}\t{$val, $ptr|$ptr, $val}",
704 [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
705 TB, LOCK;
706def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr),
707 "lock\n\t"
708 "xadd\t$val, $ptr",
709 [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
710 TB, LOCK;
711}
712
Chris Lattner5673e1d2010-10-05 06:41:40 +0000713//===----------------------------------------------------------------------===//
714// Conditional Move Pseudo Instructions.
715//===----------------------------------------------------------------------===//
716
717
718// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after
719// instruction selection into a branch sequence.
720let Uses = [EFLAGS], usesCustomInserter = 1 in {
721 def CMOV_FR32 : I<0, Pseudo,
722 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
723 "#CMOV_FR32 PSEUDO!",
724 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
725 EFLAGS))]>;
726 def CMOV_FR64 : I<0, Pseudo,
727 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
728 "#CMOV_FR64 PSEUDO!",
729 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
730 EFLAGS))]>;
731 def CMOV_V4F32 : I<0, Pseudo,
732 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
733 "#CMOV_V4F32 PSEUDO!",
734 [(set VR128:$dst,
735 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
736 EFLAGS)))]>;
737 def CMOV_V2F64 : I<0, Pseudo,
738 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
739 "#CMOV_V2F64 PSEUDO!",
740 [(set VR128:$dst,
741 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
742 EFLAGS)))]>;
743 def CMOV_V2I64 : I<0, Pseudo,
744 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
745 "#CMOV_V2I64 PSEUDO!",
746 [(set VR128:$dst,
747 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
748 EFLAGS)))]>;
749}
750
Chris Lattner010496c2010-10-05 06:22:35 +0000751
752//===----------------------------------------------------------------------===//
753// DAG Pattern Matching Rules
Chris Lattner87be16a2010-10-05 06:04:14 +0000754//===----------------------------------------------------------------------===//
755
756// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable
757def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>;
758def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>;
759def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>;
760def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>;
761def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>;
762def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>;
763
764def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)),
765 (ADD32ri GR32:$src1, tconstpool:$src2)>;
766def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)),
767 (ADD32ri GR32:$src1, tjumptable:$src2)>;
768def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)),
769 (ADD32ri GR32:$src1, tglobaladdr:$src2)>;
770def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)),
771 (ADD32ri GR32:$src1, texternalsym:$src2)>;
772def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)),
773 (ADD32ri GR32:$src1, tblockaddress:$src2)>;
774
775def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst),
776 (MOV32mi addr:$dst, tglobaladdr:$src)>;
777def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst),
778 (MOV32mi addr:$dst, texternalsym:$src)>;
779def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
780 (MOV32mi addr:$dst, tblockaddress:$src)>;
781
782
783
784// ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small
785// code model mode, should use 'movabs'. FIXME: This is really a hack, the
786// 'movabs' predicate should handle this sort of thing.
787def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
788 (MOV64ri tconstpool :$dst)>, Requires<[FarData]>;
789def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
790 (MOV64ri tjumptable :$dst)>, Requires<[FarData]>;
791def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
792 (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>;
793def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
794 (MOV64ri texternalsym:$dst)>, Requires<[FarData]>;
795def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
796 (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>;
797
798// In static codegen with small code model, we can get the address of a label
799// into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of
800// the MOV64ri64i32 should accept these.
801def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
802 (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>;
803def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
804 (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>;
805def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
806 (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>;
807def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
808 (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>;
809def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
810 (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>;
811
812// In kernel code model, we can get the address of a label
813// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
814// the MOV64ri32 should accept these.
815def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
816 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
817def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
818 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
819def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
820 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
821def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
822 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
823def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
824 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
825
826// If we have small model and -static mode, it is safe to store global addresses
827// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
828// for MOV64mi32 should handle this sort of thing.
829def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
830 (MOV64mi32 addr:$dst, tconstpool:$src)>,
831 Requires<[NearData, IsStatic]>;
832def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
833 (MOV64mi32 addr:$dst, tjumptable:$src)>,
834 Requires<[NearData, IsStatic]>;
835def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
836 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
837 Requires<[NearData, IsStatic]>;
838def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
839 (MOV64mi32 addr:$dst, texternalsym:$src)>,
840 Requires<[NearData, IsStatic]>;
841def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
842 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
843 Requires<[NearData, IsStatic]>;
844
845
846
847// Calls
848
849// tls has some funny stuff here...
850// This corresponds to movabs $foo@tpoff, %rax
851def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
852 (MOV64ri tglobaltlsaddr :$dst)>;
853// This corresponds to add $foo@tpoff, %rax
854def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
855 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
856// This corresponds to mov foo@tpoff(%rbx), %eax
857def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
858 (MOV64rm tglobaltlsaddr :$dst)>;
859
860
861// Direct PC relative function call for small code model. 32-bit displacement
862// sign extended to 64-bit.
863def : Pat<(X86call (i64 tglobaladdr:$dst)),
864 (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>;
865def : Pat<(X86call (i64 texternalsym:$dst)),
866 (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>;
867
868def : Pat<(X86call (i64 tglobaladdr:$dst)),
869 (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>;
870def : Pat<(X86call (i64 texternalsym:$dst)),
871 (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
872
873// tailcall stuff
874def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
875 (TCRETURNri GR32_TC:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000876 Requires<[In32BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000877
878// FIXME: This is disabled for 32-bit PIC mode because the global base
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000879// register which is part of the address mode may be assigned a
Chris Lattner87be16a2010-10-05 06:04:14 +0000880// callee-saved register.
881def : Pat<(X86tcret (load addr:$dst), imm:$off),
882 (TCRETURNmi addr:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000883 Requires<[In32BitMode, IsNotPIC]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000884
885def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
886 (TCRETURNdi texternalsym:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000887 Requires<[In32BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000888
889def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
890 (TCRETURNdi texternalsym:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000891 Requires<[In32BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000892
NAKAMURA Takumi7754f852011-01-26 02:04:09 +0000893def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
894 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000895 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000896
897def : Pat<(X86tcret (load addr:$dst), imm:$off),
898 (TCRETURNmi64 addr:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000899 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000900
901def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
902 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000903 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000904
905def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
906 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
NAKAMURA Takumie5fffe92011-01-26 02:03:37 +0000907 Requires<[In64BitMode]>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000908
909// Normal calls, with various flavors of addresses.
910def : Pat<(X86call (i32 tglobaladdr:$dst)),
911 (CALLpcrel32 tglobaladdr:$dst)>;
912def : Pat<(X86call (i32 texternalsym:$dst)),
913 (CALLpcrel32 texternalsym:$dst)>;
914def : Pat<(X86call (i32 imm:$dst)),
915 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
916
Chris Lattner87be16a2010-10-05 06:04:14 +0000917// Comparisons.
918
919// TEST R,R is smaller than CMP R,0
920def : Pat<(X86cmp GR8:$src1, 0),
921 (TEST8rr GR8:$src1, GR8:$src1)>;
922def : Pat<(X86cmp GR16:$src1, 0),
923 (TEST16rr GR16:$src1, GR16:$src1)>;
924def : Pat<(X86cmp GR32:$src1, 0),
925 (TEST32rr GR32:$src1, GR32:$src1)>;
926def : Pat<(X86cmp GR64:$src1, 0),
927 (TEST64rr GR64:$src1, GR64:$src1)>;
928
929// Conditional moves with folded loads with operands swapped and conditions
930// inverted.
Chris Lattner286997c2010-10-05 22:42:54 +0000931multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
932 Instruction Inst64> {
933 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
934 (Inst16 GR16:$src2, addr:$src1)>;
935 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
936 (Inst32 GR32:$src2, addr:$src1)>;
937 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
938 (Inst64 GR64:$src2, addr:$src1)>;
939}
Chris Lattner87be16a2010-10-05 06:04:14 +0000940
Chris Lattnerdf72eae2010-10-05 22:51:56 +0000941defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
942defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
943defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
944defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
945defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
Chris Lattner25cbf502010-10-05 23:00:14 +0000946defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
Chris Lattnerdf72eae2010-10-05 22:51:56 +0000947defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
948defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
949defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
950defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
951defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
952defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
953defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
954defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
955defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
956defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
Chris Lattner87be16a2010-10-05 06:04:14 +0000957
958// zextload bool -> zextload byte
959def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
960def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
961def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
962def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
963
964// extload bool -> extload byte
Michael J. Spencer6e56b182010-10-20 23:40:27 +0000965// When extloading from 16-bit and smaller memory locations into 64-bit
966// registers, use zero-extending loads so that the entire 64-bit register is
Chris Lattner87be16a2010-10-05 06:04:14 +0000967// defined, avoiding partial-register updates.
968
969def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
970def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
971def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
972def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
973def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
974def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
975
976def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
977def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>;
978def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
979// For other extloads, use subregs, since the high contents of the register are
980// defined after an extload.
981def : Pat<(extloadi64i32 addr:$src),
982 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
983 sub_32bit)>;
984
985// anyext. Define these to do an explicit zero-extend to
986// avoid partial-register updates.
987def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
988def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
989
990// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
991def : Pat<(i32 (anyext GR16:$src)),
992 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
993
994def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
995def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
996def : Pat<(i64 (anyext GR32:$src)),
997 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
998
Chris Lattnerd8cc2722010-10-05 06:47:35 +0000999
1000// Any instruction that defines a 32-bit result leaves the high half of the
1001// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1002// be copying from a truncate. And x86's cmov doesn't do anything if the
1003// condition is false. But any other 32-bit operation will zero-extend
1004// up to 64 bits.
1005def def32 : PatLeaf<(i32 GR32:$src), [{
1006 return N->getOpcode() != ISD::TRUNCATE &&
1007 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1008 N->getOpcode() != ISD::CopyFromReg &&
1009 N->getOpcode() != X86ISD::CMOV;
1010}]>;
1011
1012// In the case of a 32-bit def that is known to implicitly zero-extend,
1013// we can use a SUBREG_TO_REG.
1014def : Pat<(i64 (zext def32:$src)),
1015 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1016
Chris Lattner87be16a2010-10-05 06:04:14 +00001017//===----------------------------------------------------------------------===//
Chris Lattner99ae6652010-10-08 03:54:52 +00001018// Pattern match OR as ADD
1019//===----------------------------------------------------------------------===//
1020
1021// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1022// 3-addressified into an LEA instruction to avoid copies. However, we also
1023// want to finally emit these instructions as an or at the end of the code
1024// generator to make the generated code easier to read. To do this, we select
1025// into "disjoint bits" pseudo ops.
1026
1027// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1028def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1029 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1030 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1031
1032 unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
1033 APInt Mask = APInt::getAllOnesValue(BitWidth);
1034 APInt KnownZero0, KnownOne0;
1035 CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
1036 APInt KnownZero1, KnownOne1;
1037 CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
1038 return (~KnownZero0 & ~KnownZero1) == 0;
1039}]>;
1040
1041
1042// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1043let AddedComplexity = 5 in { // Try this before the selecting to OR
1044
Evan Chengf735f2d2010-12-15 22:57:36 +00001045let isConvertibleToThreeAddress = 1,
Chris Lattner99ae6652010-10-08 03:54:52 +00001046 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
Evan Chengf735f2d2010-12-15 22:57:36 +00001047let isCommutable = 1 in {
Chris Lattner99ae6652010-10-08 03:54:52 +00001048def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1049 "", // orw/addw REG, REG
1050 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1051def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1052 "", // orl/addl REG, REG
1053 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1054def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1055 "", // orq/addq REG, REG
1056 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
Evan Chengf735f2d2010-12-15 22:57:36 +00001057} // isCommutable
Rafael Espindola6d862802010-10-13 17:14:25 +00001058
1059// NOTE: These are order specific, we want the ri8 forms to be listed
1060// first so that they are slightly preferred to the ri forms.
1061
Chris Lattner15df55d2010-10-08 03:57:25 +00001062def ADD16ri8_DB : I<0, Pseudo,
1063 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1064 "", // orw/addw REG, imm8
1065 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
Rafael Espindola6d862802010-10-13 17:14:25 +00001066def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1067 "", // orw/addw REG, imm
1068 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1069
Chris Lattner15df55d2010-10-08 03:57:25 +00001070def ADD32ri8_DB : I<0, Pseudo,
1071 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1072 "", // orl/addl REG, imm8
1073 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
Rafael Espindola6d862802010-10-13 17:14:25 +00001074def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1075 "", // orl/addl REG, imm
1076 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1077
1078
Chris Lattner15df55d2010-10-08 03:57:25 +00001079def ADD64ri8_DB : I<0, Pseudo,
1080 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1081 "", // orq/addq REG, imm8
1082 [(set GR64:$dst, (or_is_add GR64:$src1,
1083 i64immSExt8:$src2))]>;
Rafael Espindola6d862802010-10-13 17:14:25 +00001084def ADD64ri32_DB : I<0, Pseudo,
1085 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1086 "", // orq/addq REG, imm
1087 [(set GR64:$dst, (or_is_add GR64:$src1,
1088 i64immSExt32:$src2))]>;
Chris Lattner99ae6652010-10-08 03:54:52 +00001089}
Chris Lattner99ae6652010-10-08 03:54:52 +00001090} // AddedComplexity
1091
1092
1093//===----------------------------------------------------------------------===//
Chris Lattner87be16a2010-10-05 06:04:14 +00001094// Some peepholes
1095//===----------------------------------------------------------------------===//
1096
1097// Odd encoding trick: -128 fits into an 8-bit immediate field while
1098// +128 doesn't, so in this special case use a sub instead of an add.
1099def : Pat<(add GR16:$src1, 128),
1100 (SUB16ri8 GR16:$src1, -128)>;
1101def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1102 (SUB16mi8 addr:$dst, -128)>;
1103
1104def : Pat<(add GR32:$src1, 128),
1105 (SUB32ri8 GR32:$src1, -128)>;
1106def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1107 (SUB32mi8 addr:$dst, -128)>;
1108
1109def : Pat<(add GR64:$src1, 128),
1110 (SUB64ri8 GR64:$src1, -128)>;
1111def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1112 (SUB64mi8 addr:$dst, -128)>;
1113
1114// The same trick applies for 32-bit immediate fields in 64-bit
1115// instructions.
1116def : Pat<(add GR64:$src1, 0x0000000080000000),
1117 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1118def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst),
1119 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1120
Rafael Espindoladba81cf2010-10-13 13:31:20 +00001121// To avoid needing to materialize an immediate in a register, use a 32-bit and
1122// with implicit zero-extension instead of a 64-bit and if the immediate has at
1123// least 32 bits of leading zeros. If in addition the last 32 bits can be
1124// represented with a sign extension of a 8 bit constant, use that.
1125
1126def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1127 (SUBREG_TO_REG
1128 (i64 0),
1129 (AND32ri8
1130 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1131 (i32 (GetLo8XForm imm:$imm))),
1132 sub_32bit)>;
1133
Chris Lattner87be16a2010-10-05 06:04:14 +00001134def : Pat<(and GR64:$src, i64immZExt32:$imm),
1135 (SUBREG_TO_REG
1136 (i64 0),
1137 (AND32ri
1138 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1139 (i32 (GetLo32XForm imm:$imm))),
1140 sub_32bit)>;
1141
1142
1143// r & (2^16-1) ==> movz
1144def : Pat<(and GR32:$src1, 0xffff),
1145 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1146// r & (2^8-1) ==> movz
1147def : Pat<(and GR32:$src1, 0xff),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001148 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
Chris Lattner87be16a2010-10-05 06:04:14 +00001149 GR32_ABCD)),
1150 sub_8bit))>,
1151 Requires<[In32BitMode]>;
1152// r & (2^8-1) ==> movz
1153def : Pat<(and GR16:$src1, 0xff),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001154 (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
Chris Lattner87be16a2010-10-05 06:04:14 +00001155 GR16_ABCD)),
1156 sub_8bit))>,
1157 Requires<[In32BitMode]>;
1158
1159// r & (2^32-1) ==> movz
1160def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1161 (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1162// r & (2^16-1) ==> movz
1163def : Pat<(and GR64:$src, 0xffff),
1164 (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
1165// r & (2^8-1) ==> movz
1166def : Pat<(and GR64:$src, 0xff),
1167 (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
1168// r & (2^8-1) ==> movz
1169def : Pat<(and GR32:$src1, 0xff),
1170 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
1171 Requires<[In64BitMode]>;
1172// r & (2^8-1) ==> movz
1173def : Pat<(and GR16:$src1, 0xff),
1174 (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
1175 Requires<[In64BitMode]>;
1176
1177
1178// sext_inreg patterns
1179def : Pat<(sext_inreg GR32:$src, i16),
1180 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1181def : Pat<(sext_inreg GR32:$src, i8),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001182 (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001183 GR32_ABCD)),
1184 sub_8bit))>,
1185 Requires<[In32BitMode]>;
1186def : Pat<(sext_inreg GR16:$src, i8),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001187 (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001188 GR16_ABCD)),
1189 sub_8bit))>,
1190 Requires<[In32BitMode]>;
1191
1192def : Pat<(sext_inreg GR64:$src, i32),
1193 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1194def : Pat<(sext_inreg GR64:$src, i16),
1195 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1196def : Pat<(sext_inreg GR64:$src, i8),
1197 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1198def : Pat<(sext_inreg GR32:$src, i8),
1199 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
1200 Requires<[In64BitMode]>;
1201def : Pat<(sext_inreg GR16:$src, i8),
1202 (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
1203 Requires<[In64BitMode]>;
1204
1205
1206// trunc patterns
1207def : Pat<(i16 (trunc GR32:$src)),
1208 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1209def : Pat<(i8 (trunc GR32:$src)),
1210 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1211 sub_8bit)>,
1212 Requires<[In32BitMode]>;
1213def : Pat<(i8 (trunc GR16:$src)),
1214 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1215 sub_8bit)>,
1216 Requires<[In32BitMode]>;
1217def : Pat<(i32 (trunc GR64:$src)),
1218 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1219def : Pat<(i16 (trunc GR64:$src)),
1220 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1221def : Pat<(i8 (trunc GR64:$src)),
1222 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1223def : Pat<(i8 (trunc GR32:$src)),
1224 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1225 Requires<[In64BitMode]>;
1226def : Pat<(i8 (trunc GR16:$src)),
1227 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1228 Requires<[In64BitMode]>;
1229
1230// h-register tricks
1231def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1232 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1233 sub_8bit_hi)>,
1234 Requires<[In32BitMode]>;
1235def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1236 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1237 sub_8bit_hi)>,
1238 Requires<[In32BitMode]>;
1239def : Pat<(srl GR16:$src, (i8 8)),
1240 (EXTRACT_SUBREG
1241 (MOVZX32rr8
1242 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1243 sub_8bit_hi)),
1244 sub_16bit)>,
1245 Requires<[In32BitMode]>;
1246def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001247 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001248 GR16_ABCD)),
1249 sub_8bit_hi))>,
1250 Requires<[In32BitMode]>;
1251def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001252 (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001253 GR16_ABCD)),
1254 sub_8bit_hi))>,
1255 Requires<[In32BitMode]>;
1256def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001257 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001258 GR32_ABCD)),
1259 sub_8bit_hi))>,
1260 Requires<[In32BitMode]>;
1261def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001262 (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001263 GR32_ABCD)),
1264 sub_8bit_hi))>,
1265 Requires<[In32BitMode]>;
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001266
Chris Lattner87be16a2010-10-05 06:04:14 +00001267// h-register tricks.
1268// For now, be conservative on x86-64 and use an h-register extract only if the
1269// value is immediately zero-extended or stored, which are somewhat common
1270// cases. This uses a bunch of code to prevent a register requiring a REX prefix
1271// from being allocated in the same instruction as the h register, as there's
1272// currently no way to describe this requirement to the register allocator.
1273
1274// h-register extract and zero-extend.
1275def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1276 (SUBREG_TO_REG
1277 (i64 0),
1278 (MOVZX32_NOREXrr8
1279 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1280 sub_8bit_hi)),
1281 sub_32bit)>;
1282def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1283 (MOVZX32_NOREXrr8
1284 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1285 sub_8bit_hi))>,
1286 Requires<[In64BitMode]>;
1287def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001288 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
Chris Lattner87be16a2010-10-05 06:04:14 +00001289 GR32_ABCD)),
1290 sub_8bit_hi))>,
1291 Requires<[In64BitMode]>;
1292def : Pat<(srl GR16:$src, (i8 8)),
1293 (EXTRACT_SUBREG
1294 (MOVZX32_NOREXrr8
1295 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1296 sub_8bit_hi)),
1297 sub_16bit)>,
1298 Requires<[In64BitMode]>;
1299def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1300 (MOVZX32_NOREXrr8
1301 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1302 sub_8bit_hi))>,
1303 Requires<[In64BitMode]>;
1304def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1305 (MOVZX32_NOREXrr8
1306 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1307 sub_8bit_hi))>,
1308 Requires<[In64BitMode]>;
1309def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1310 (SUBREG_TO_REG
1311 (i64 0),
1312 (MOVZX32_NOREXrr8
1313 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1314 sub_8bit_hi)),
1315 sub_32bit)>;
1316def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1317 (SUBREG_TO_REG
1318 (i64 0),
1319 (MOVZX32_NOREXrr8
1320 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1321 sub_8bit_hi)),
1322 sub_32bit)>;
1323
1324// h-register extract and store.
1325def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1326 (MOV8mr_NOREX
1327 addr:$dst,
1328 (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
1329 sub_8bit_hi))>;
1330def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1331 (MOV8mr_NOREX
1332 addr:$dst,
1333 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1334 sub_8bit_hi))>,
1335 Requires<[In64BitMode]>;
1336def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1337 (MOV8mr_NOREX
1338 addr:$dst,
1339 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1340 sub_8bit_hi))>,
1341 Requires<[In64BitMode]>;
Michael J. Spencer6e56b182010-10-20 23:40:27 +00001342
1343
Chris Lattner87be16a2010-10-05 06:04:14 +00001344// (shl x, 1) ==> (add x, x)
1345def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1346def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1347def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1348def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1349
1350// (shl x (and y, 31)) ==> (shl x, y)
1351def : Pat<(shl GR8:$src1, (and CL, 31)),
1352 (SHL8rCL GR8:$src1)>;
1353def : Pat<(shl GR16:$src1, (and CL, 31)),
1354 (SHL16rCL GR16:$src1)>;
1355def : Pat<(shl GR32:$src1, (and CL, 31)),
1356 (SHL32rCL GR32:$src1)>;
1357def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
1358 (SHL8mCL addr:$dst)>;
1359def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
1360 (SHL16mCL addr:$dst)>;
1361def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
1362 (SHL32mCL addr:$dst)>;
1363
1364def : Pat<(srl GR8:$src1, (and CL, 31)),
1365 (SHR8rCL GR8:$src1)>;
1366def : Pat<(srl GR16:$src1, (and CL, 31)),
1367 (SHR16rCL GR16:$src1)>;
1368def : Pat<(srl GR32:$src1, (and CL, 31)),
1369 (SHR32rCL GR32:$src1)>;
1370def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
1371 (SHR8mCL addr:$dst)>;
1372def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
1373 (SHR16mCL addr:$dst)>;
1374def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
1375 (SHR32mCL addr:$dst)>;
1376
1377def : Pat<(sra GR8:$src1, (and CL, 31)),
1378 (SAR8rCL GR8:$src1)>;
1379def : Pat<(sra GR16:$src1, (and CL, 31)),
1380 (SAR16rCL GR16:$src1)>;
1381def : Pat<(sra GR32:$src1, (and CL, 31)),
1382 (SAR32rCL GR32:$src1)>;
1383def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst),
1384 (SAR8mCL addr:$dst)>;
1385def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
1386 (SAR16mCL addr:$dst)>;
1387def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
1388 (SAR32mCL addr:$dst)>;
1389
1390// (shl x (and y, 63)) ==> (shl x, y)
1391def : Pat<(shl GR64:$src1, (and CL, 63)),
1392 (SHL64rCL GR64:$src1)>;
1393def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1394 (SHL64mCL addr:$dst)>;
1395
1396def : Pat<(srl GR64:$src1, (and CL, 63)),
1397 (SHR64rCL GR64:$src1)>;
1398def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1399 (SHR64mCL addr:$dst)>;
1400
1401def : Pat<(sra GR64:$src1, (and CL, 63)),
1402 (SAR64rCL GR64:$src1)>;
1403def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
1404 (SAR64mCL addr:$dst)>;
1405
1406
1407// (anyext (setcc_carry)) -> (setcc_carry)
1408def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1409 (SETB_C16r)>;
1410def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1411 (SETB_C32r)>;
1412def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1413 (SETB_C32r)>;
1414
Chris Lattner99ae6652010-10-08 03:54:52 +00001415
1416
Chris Lattner87be16a2010-10-05 06:04:14 +00001417
1418//===----------------------------------------------------------------------===//
1419// EFLAGS-defining Patterns
1420//===----------------------------------------------------------------------===//
1421
1422// add reg, reg
1423def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1424def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1425def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1426
1427// add reg, mem
1428def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1429 (ADD8rm GR8:$src1, addr:$src2)>;
1430def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1431 (ADD16rm GR16:$src1, addr:$src2)>;
1432def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1433 (ADD32rm GR32:$src1, addr:$src2)>;
1434
1435// add reg, imm
1436def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1437def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1438def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1439def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1440 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1441def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1442 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1443
1444// sub reg, reg
1445def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1446def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1447def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1448
1449// sub reg, mem
1450def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1451 (SUB8rm GR8:$src1, addr:$src2)>;
1452def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1453 (SUB16rm GR16:$src1, addr:$src2)>;
1454def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1455 (SUB32rm GR32:$src1, addr:$src2)>;
1456
1457// sub reg, imm
1458def : Pat<(sub GR8:$src1, imm:$src2),
1459 (SUB8ri GR8:$src1, imm:$src2)>;
1460def : Pat<(sub GR16:$src1, imm:$src2),
1461 (SUB16ri GR16:$src1, imm:$src2)>;
1462def : Pat<(sub GR32:$src1, imm:$src2),
1463 (SUB32ri GR32:$src1, imm:$src2)>;
1464def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1465 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1466def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1467 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1468
1469// mul reg, reg
1470def : Pat<(mul GR16:$src1, GR16:$src2),
1471 (IMUL16rr GR16:$src1, GR16:$src2)>;
1472def : Pat<(mul GR32:$src1, GR32:$src2),
1473 (IMUL32rr GR32:$src1, GR32:$src2)>;
1474
1475// mul reg, mem
1476def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1477 (IMUL16rm GR16:$src1, addr:$src2)>;
1478def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1479 (IMUL32rm GR32:$src1, addr:$src2)>;
1480
1481// mul reg, imm
1482def : Pat<(mul GR16:$src1, imm:$src2),
1483 (IMUL16rri GR16:$src1, imm:$src2)>;
1484def : Pat<(mul GR32:$src1, imm:$src2),
1485 (IMUL32rri GR32:$src1, imm:$src2)>;
1486def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1487 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1488def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1489 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1490
1491// reg = mul mem, imm
1492def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1493 (IMUL16rmi addr:$src1, imm:$src2)>;
1494def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1495 (IMUL32rmi addr:$src1, imm:$src2)>;
1496def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1497 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1498def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1499 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1500
1501// Optimize multiply by 2 with EFLAGS result.
1502let AddedComplexity = 2 in {
1503def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>;
1504def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>;
1505}
1506
1507// Patterns for nodes that do not produce flags, for instructions that do.
1508
1509// addition
1510def : Pat<(add GR64:$src1, GR64:$src2),
1511 (ADD64rr GR64:$src1, GR64:$src2)>;
1512def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1513 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1514def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1515 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1516def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1517 (ADD64rm GR64:$src1, addr:$src2)>;
1518
1519// subtraction
1520def : Pat<(sub GR64:$src1, GR64:$src2),
1521 (SUB64rr GR64:$src1, GR64:$src2)>;
1522def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1523 (SUB64rm GR64:$src1, addr:$src2)>;
1524def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1525 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1526def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1527 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1528
1529// Multiply
1530def : Pat<(mul GR64:$src1, GR64:$src2),
1531 (IMUL64rr GR64:$src1, GR64:$src2)>;
1532def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1533 (IMUL64rm GR64:$src1, addr:$src2)>;
1534def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1535 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1536def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1537 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1538def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1539 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1540def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1541 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1542
1543// Increment reg.
1544def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>;
1545def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
1546def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1547def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
1548def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1549def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1550
1551// Decrement reg.
1552def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>;
1553def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
1554def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
1555def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
1556def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
1557def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1558
1559// or reg/reg.
1560def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1561def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1562def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1563def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1564
1565// or reg/mem
1566def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1567 (OR8rm GR8:$src1, addr:$src2)>;
1568def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1569 (OR16rm GR16:$src1, addr:$src2)>;
1570def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1571 (OR32rm GR32:$src1, addr:$src2)>;
1572def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1573 (OR64rm GR64:$src1, addr:$src2)>;
1574
1575// or reg/imm
1576def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1577def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1578def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1579def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1580 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1581def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1582 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1583def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1584 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1585def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1586 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1587
1588// xor reg/reg
1589def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1590def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1591def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1592def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1593
1594// xor reg/mem
1595def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1596 (XOR8rm GR8:$src1, addr:$src2)>;
1597def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1598 (XOR16rm GR16:$src1, addr:$src2)>;
1599def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1600 (XOR32rm GR32:$src1, addr:$src2)>;
1601def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1602 (XOR64rm GR64:$src1, addr:$src2)>;
1603
1604// xor reg/imm
1605def : Pat<(xor GR8:$src1, imm:$src2),
1606 (XOR8ri GR8:$src1, imm:$src2)>;
1607def : Pat<(xor GR16:$src1, imm:$src2),
1608 (XOR16ri GR16:$src1, imm:$src2)>;
1609def : Pat<(xor GR32:$src1, imm:$src2),
1610 (XOR32ri GR32:$src1, imm:$src2)>;
1611def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1612 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1613def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1614 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1615def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1616 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1617def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1618 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1619
1620// and reg/reg
1621def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1622def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1623def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1624def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1625
1626// and reg/mem
1627def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1628 (AND8rm GR8:$src1, addr:$src2)>;
1629def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1630 (AND16rm GR16:$src1, addr:$src2)>;
1631def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1632 (AND32rm GR32:$src1, addr:$src2)>;
1633def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1634 (AND64rm GR64:$src1, addr:$src2)>;
1635
1636// and reg/imm
1637def : Pat<(and GR8:$src1, imm:$src2),
1638 (AND8ri GR8:$src1, imm:$src2)>;
1639def : Pat<(and GR16:$src1, imm:$src2),
1640 (AND16ri GR16:$src1, imm:$src2)>;
1641def : Pat<(and GR32:$src1, imm:$src2),
1642 (AND32ri GR32:$src1, imm:$src2)>;
1643def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1644 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1645def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1646 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1647def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1648 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1649def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1650 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;