Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 1 | //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file describes the various pseudo instructions used by the compiler, |
| 11 | // as well as Pat patterns used during instruction selection. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
Chris Lattner | 41efbfa | 2010-10-05 06:37:31 +0000 | [diff] [blame] | 15 | //===----------------------------------------------------------------------===// |
| 16 | // Pattern Matching Support |
| 17 | |
| 18 | def GetLo32XForm : SDNodeXForm<imm, [{ |
| 19 | // Transformation function: get the low 32 bits. |
| 20 | return getI32Imm((unsigned)N->getZExtValue()); |
| 21 | }]>; |
| 22 | |
Rafael Espindola | dba81cf | 2010-10-13 13:31:20 +0000 | [diff] [blame^] | 23 | def GetLo8XForm : SDNodeXForm<imm, [{ |
| 24 | // Transformation function: get the low 8 bits. |
| 25 | return getI8Imm((uint8_t)N->getZExtValue()); |
| 26 | }]>; |
| 27 | |
Chris Lattner | 41efbfa | 2010-10-05 06:37:31 +0000 | [diff] [blame] | 28 | |
| 29 | //===----------------------------------------------------------------------===// |
| 30 | // Random Pseudo Instructions. |
| 31 | |
Chris Lattner | 8af88ef | 2010-10-05 06:10:16 +0000 | [diff] [blame] | 32 | // PIC base construction. This expands to code that looks like this: |
| 33 | // call $next_inst |
| 34 | // popl %destreg" |
| 35 | let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in |
| 36 | def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), |
| 37 | "", []>; |
| 38 | |
| 39 | |
| 40 | // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into |
| 41 | // a stack adjustment and the codegen must know that they may modify the stack |
| 42 | // pointer before prolog-epilog rewriting occurs. |
| 43 | // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become |
| 44 | // sub / add which can clobber EFLAGS. |
| 45 | let Defs = [ESP, EFLAGS], Uses = [ESP] in { |
| 46 | def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt), |
| 47 | "#ADJCALLSTACKDOWN", |
| 48 | [(X86callseq_start timm:$amt)]>, |
| 49 | Requires<[In32BitMode]>; |
| 50 | def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), |
| 51 | "#ADJCALLSTACKUP", |
| 52 | [(X86callseq_end timm:$amt1, timm:$amt2)]>, |
| 53 | Requires<[In32BitMode]>; |
| 54 | } |
| 55 | |
| 56 | // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into |
| 57 | // a stack adjustment and the codegen must know that they may modify the stack |
| 58 | // pointer before prolog-epilog rewriting occurs. |
| 59 | // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become |
| 60 | // sub / add which can clobber EFLAGS. |
| 61 | let Defs = [RSP, EFLAGS], Uses = [RSP] in { |
| 62 | def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt), |
| 63 | "#ADJCALLSTACKDOWN", |
| 64 | [(X86callseq_start timm:$amt)]>, |
| 65 | Requires<[In64BitMode]>; |
| 66 | def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), |
| 67 | "#ADJCALLSTACKUP", |
| 68 | [(X86callseq_end timm:$amt1, timm:$amt2)]>, |
| 69 | Requires<[In64BitMode]>; |
| 70 | } |
| 71 | |
| 72 | |
| 73 | |
| 74 | // x86-64 va_start lowering magic. |
| 75 | let usesCustomInserter = 1 in { |
| 76 | def VASTART_SAVE_XMM_REGS : I<0, Pseudo, |
| 77 | (outs), |
| 78 | (ins GR8:$al, |
| 79 | i64imm:$regsavefi, i64imm:$offset, |
| 80 | variable_ops), |
| 81 | "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", |
| 82 | [(X86vastart_save_xmm_regs GR8:$al, |
| 83 | imm:$regsavefi, |
| 84 | imm:$offset)]>; |
| 85 | |
Dan Gohman | 320afb8 | 2010-10-12 18:00:49 +0000 | [diff] [blame] | 86 | // The VAARG_64 pseudo-instruction takes the address of the va_list, |
| 87 | // and places the address of the next argument into a register. |
| 88 | let Defs = [EFLAGS] in |
| 89 | def VAARG_64 : I<0, Pseudo, |
| 90 | (outs GR64:$dst), |
| 91 | (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), |
| 92 | "#VAARG_64 $dst, $ap, $size, $mode, $align", |
| 93 | [(set GR64:$dst, |
| 94 | (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)), |
| 95 | (implicit EFLAGS)]>; |
| 96 | |
Chris Lattner | 8af88ef | 2010-10-05 06:10:16 +0000 | [diff] [blame] | 97 | // Dynamic stack allocation yields _alloca call for Cygwin/Mingw targets. Calls |
| 98 | // to _alloca is needed to probe the stack when allocating more than 4k bytes in |
| 99 | // one go. Touching the stack at 4K increments is necessary to ensure that the |
| 100 | // guard pages used by the OS virtual memory manager are allocated in correct |
| 101 | // sequence. |
| 102 | // The main point of having separate instruction are extra unmodelled effects |
| 103 | // (compared to ordinary calls) like stack pointer change. |
| 104 | |
| 105 | let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in |
| 106 | def MINGW_ALLOCA : I<0, Pseudo, (outs), (ins), |
| 107 | "# dynamic stack allocation", |
| 108 | [(X86MingwAlloca)]>; |
| 109 | } |
| 110 | |
| 111 | |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 112 | |
| 113 | //===----------------------------------------------------------------------===// |
| 114 | // EH Pseudo Instructions |
| 115 | // |
| 116 | let isTerminator = 1, isReturn = 1, isBarrier = 1, |
| 117 | hasCtrlDep = 1, isCodeGenOnly = 1 in { |
| 118 | def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), |
| 119 | "ret\t#eh_return, addr: $addr", |
| 120 | [(X86ehret GR32:$addr)]>; |
| 121 | |
| 122 | } |
| 123 | |
| 124 | let isTerminator = 1, isReturn = 1, isBarrier = 1, |
| 125 | hasCtrlDep = 1, isCodeGenOnly = 1 in { |
| 126 | def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), |
| 127 | "ret\t#eh_return, addr: $addr", |
| 128 | [(X86ehret GR64:$addr)]>; |
| 129 | |
| 130 | } |
| 131 | |
Chris Lattner | 8af88ef | 2010-10-05 06:10:16 +0000 | [diff] [blame] | 132 | //===----------------------------------------------------------------------===// |
| 133 | // Alias Instructions |
| 134 | //===----------------------------------------------------------------------===// |
| 135 | |
| 136 | // Alias instructions that map movr0 to xor. |
| 137 | // FIXME: remove when we can teach regalloc that xor reg, reg is ok. |
| 138 | // FIXME: Set encoding to pseudo. |
| 139 | let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, |
| 140 | isCodeGenOnly = 1 in { |
| 141 | def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "", |
| 142 | [(set GR8:$dst, 0)]>; |
| 143 | |
| 144 | // We want to rewrite MOV16r0 in terms of MOV32r0, because it's a smaller |
| 145 | // encoding and avoids a partial-register update sometimes, but doing so |
| 146 | // at isel time interferes with rematerialization in the current register |
| 147 | // allocator. For now, this is rewritten when the instruction is lowered |
| 148 | // to an MCInst. |
| 149 | def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), |
| 150 | "", |
| 151 | [(set GR16:$dst, 0)]>, OpSize; |
| 152 | |
| 153 | // FIXME: Set encoding to pseudo. |
| 154 | def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "", |
| 155 | [(set GR32:$dst, 0)]>; |
| 156 | } |
| 157 | |
Chris Lattner | 010496c | 2010-10-05 06:22:35 +0000 | [diff] [blame] | 158 | // We want to rewrite MOV64r0 in terms of MOV32r0, because it's sometimes a |
| 159 | // smaller encoding, but doing so at isel time interferes with rematerialization |
| 160 | // in the current register allocator. For now, this is rewritten when the |
| 161 | // instruction is lowered to an MCInst. |
| 162 | // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove |
| 163 | // when we have a better way to specify isel priority. |
| 164 | let Defs = [EFLAGS], |
| 165 | AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in |
| 166 | def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "", |
| 167 | [(set GR64:$dst, 0)]>; |
| 168 | |
| 169 | // Materialize i64 constant where top 32-bits are zero. This could theoretically |
| 170 | // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however |
| 171 | // that would make it more difficult to rematerialize. |
| 172 | let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in |
| 173 | def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), |
| 174 | "", [(set GR64:$dst, i64immZExt32:$src)]>; |
| 175 | |
Chris Lattner | 35649fc | 2010-10-05 06:33:16 +0000 | [diff] [blame] | 176 | |
Chris Lattner | 2c383d8 | 2010-10-05 21:18:04 +0000 | [diff] [blame] | 177 | // Use sbb to materialize carry bit. |
| 178 | let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in { |
| 179 | // FIXME: These are pseudo ops that should be replaced with Pat<> patterns. |
Chris Lattner | 35649fc | 2010-10-05 06:33:16 +0000 | [diff] [blame] | 180 | // However, Pat<> can't replicate the destination reg into the inputs of the |
| 181 | // result. |
Chris Lattner | 2c383d8 | 2010-10-05 21:18:04 +0000 | [diff] [blame] | 182 | // FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces |
Chris Lattner | 35649fc | 2010-10-05 06:33:16 +0000 | [diff] [blame] | 183 | // X86CodeEmitter. |
Chris Lattner | 2c383d8 | 2010-10-05 21:18:04 +0000 | [diff] [blame] | 184 | def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "", |
| 185 | [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; |
| 186 | def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "", |
| 187 | [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>, |
| 188 | OpSize; |
| 189 | def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "", |
| 190 | [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; |
Chris Lattner | 35649fc | 2010-10-05 06:33:16 +0000 | [diff] [blame] | 191 | def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "", |
| 192 | [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; |
Chris Lattner | 2c383d8 | 2010-10-05 21:18:04 +0000 | [diff] [blame] | 193 | } // isCodeGenOnly |
| 194 | |
Chris Lattner | 35649fc | 2010-10-05 06:33:16 +0000 | [diff] [blame] | 195 | |
| 196 | def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 197 | (SETB_C64r)>; |
| 198 | |
Chris Lattner | 010496c | 2010-10-05 06:22:35 +0000 | [diff] [blame] | 199 | |
Chris Lattner | d3f033d | 2010-10-05 06:27:48 +0000 | [diff] [blame] | 200 | //===----------------------------------------------------------------------===// |
| 201 | // String Pseudo Instructions |
| 202 | // |
| 203 | let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { |
| 204 | def REP_MOVSB : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}", |
| 205 | [(X86rep_movs i8)]>, REP; |
| 206 | def REP_MOVSW : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}", |
| 207 | [(X86rep_movs i16)]>, REP, OpSize; |
| 208 | def REP_MOVSD : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}", |
| 209 | [(X86rep_movs i32)]>, REP; |
| 210 | } |
| 211 | |
| 212 | let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in |
| 213 | def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}", |
| 214 | [(X86rep_movs i64)]>, REP; |
| 215 | |
| 216 | |
| 217 | // FIXME: Should use "(X86rep_stos AL)" as the pattern. |
| 218 | let Defs = [ECX,EDI], Uses = [AL,ECX,EDI], isCodeGenOnly = 1 in |
| 219 | def REP_STOSB : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}", |
| 220 | [(X86rep_stos i8)]>, REP; |
| 221 | let Defs = [ECX,EDI], Uses = [AX,ECX,EDI], isCodeGenOnly = 1 in |
| 222 | def REP_STOSW : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}", |
| 223 | [(X86rep_stos i16)]>, REP, OpSize; |
| 224 | let Defs = [ECX,EDI], Uses = [EAX,ECX,EDI], isCodeGenOnly = 1 in |
| 225 | def REP_STOSD : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}", |
| 226 | [(X86rep_stos i32)]>, REP; |
| 227 | |
| 228 | let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in |
| 229 | def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}", |
| 230 | [(X86rep_stos i64)]>, REP; |
Chris Lattner | 010496c | 2010-10-05 06:22:35 +0000 | [diff] [blame] | 231 | |
| 232 | |
Chris Lattner | 8af88ef | 2010-10-05 06:10:16 +0000 | [diff] [blame] | 233 | //===----------------------------------------------------------------------===// |
| 234 | // Thread Local Storage Instructions |
| 235 | // |
| 236 | |
| 237 | // ELF TLS Support |
| 238 | // All calls clobber the non-callee saved registers. ESP is marked as |
| 239 | // a use to prevent stack-pointer assignments that appear immediately |
| 240 | // before calls from potentially appearing dead. |
| 241 | let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, |
| 242 | MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, |
| 243 | XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, |
| 244 | XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], |
| 245 | Uses = [ESP] in |
| 246 | def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), |
| 247 | "leal\t$sym, %eax; " |
| 248 | "call\t___tls_get_addr@PLT", |
| 249 | [(X86tlsaddr tls32addr:$sym)]>, |
| 250 | Requires<[In32BitMode]>; |
| 251 | |
| 252 | // All calls clobber the non-callee saved registers. RSP is marked as |
| 253 | // a use to prevent stack-pointer assignments that appear immediately |
| 254 | // before calls from potentially appearing dead. |
| 255 | let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, |
| 256 | FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1, |
| 257 | MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, |
| 258 | XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, |
| 259 | XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], |
| 260 | Uses = [RSP] in |
| 261 | def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), |
| 262 | ".byte\t0x66; " |
| 263 | "leaq\t$sym(%rip), %rdi; " |
| 264 | ".word\t0x6666; " |
| 265 | "rex64; " |
| 266 | "call\t__tls_get_addr@PLT", |
| 267 | [(X86tlsaddr tls64addr:$sym)]>, |
| 268 | Requires<[In64BitMode]>; |
| 269 | |
| 270 | // Darwin TLS Support |
| 271 | // For i386, the address of the thunk is passed on the stack, on return the |
| 272 | // address of the variable is in %eax. %ecx is trashed during the function |
| 273 | // call. All other registers are preserved. |
| 274 | let Defs = [EAX, ECX], |
| 275 | Uses = [ESP], |
| 276 | usesCustomInserter = 1 in |
| 277 | def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym), |
| 278 | "# TLSCall_32", |
| 279 | [(X86TLSCall addr:$sym)]>, |
| 280 | Requires<[In32BitMode]>; |
| 281 | |
| 282 | // For x86_64, the address of the thunk is passed in %rdi, on return |
| 283 | // the address of the variable is in %rax. All other registers are preserved. |
| 284 | let Defs = [RAX], |
| 285 | Uses = [RDI], |
| 286 | usesCustomInserter = 1 in |
| 287 | def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), |
| 288 | "# TLSCall_64", |
| 289 | [(X86TLSCall addr:$sym)]>, |
| 290 | Requires<[In64BitMode]>; |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 291 | |
Chris Lattner | 6dbbff9 | 2010-10-05 23:09:10 +0000 | [diff] [blame] | 292 | |
| 293 | //===----------------------------------------------------------------------===// |
| 294 | // Conditional Move Pseudo Instructions |
| 295 | |
| 296 | let Constraints = "$src1 = $dst" in { |
| 297 | |
| 298 | // Conditional moves |
| 299 | let Uses = [EFLAGS] in { |
| 300 | |
| 301 | // X86 doesn't have 8-bit conditional moves. Use a customInserter to |
| 302 | // emit control flow. An alternative to this is to mark i8 SELECT as Promote, |
| 303 | // however that requires promoting the operands, and can induce additional |
| 304 | // i8 register pressure. Note that CMOV_GR8 is conservatively considered to |
| 305 | // clobber EFLAGS, because if one of the operands is zero, the expansion |
| 306 | // could involve an xor. |
| 307 | let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in { |
| 308 | def CMOV_GR8 : I<0, Pseudo, |
| 309 | (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond), |
| 310 | "#CMOV_GR8 PSEUDO!", |
| 311 | [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2, |
| 312 | imm:$cond, EFLAGS))]>; |
| 313 | |
| 314 | let Predicates = [NoCMov] in { |
| 315 | def CMOV_GR32 : I<0, Pseudo, |
| 316 | (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond), |
| 317 | "#CMOV_GR32* PSEUDO!", |
| 318 | [(set GR32:$dst, |
| 319 | (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>; |
| 320 | def CMOV_GR16 : I<0, Pseudo, |
| 321 | (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond), |
| 322 | "#CMOV_GR16* PSEUDO!", |
| 323 | [(set GR16:$dst, |
| 324 | (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>; |
| 325 | def CMOV_RFP32 : I<0, Pseudo, |
| 326 | (outs RFP32:$dst), |
| 327 | (ins RFP32:$src1, RFP32:$src2, i8imm:$cond), |
| 328 | "#CMOV_RFP32 PSEUDO!", |
| 329 | [(set RFP32:$dst, |
| 330 | (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond, |
| 331 | EFLAGS))]>; |
| 332 | def CMOV_RFP64 : I<0, Pseudo, |
| 333 | (outs RFP64:$dst), |
| 334 | (ins RFP64:$src1, RFP64:$src2, i8imm:$cond), |
| 335 | "#CMOV_RFP64 PSEUDO!", |
| 336 | [(set RFP64:$dst, |
| 337 | (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond, |
| 338 | EFLAGS))]>; |
| 339 | def CMOV_RFP80 : I<0, Pseudo, |
| 340 | (outs RFP80:$dst), |
| 341 | (ins RFP80:$src1, RFP80:$src2, i8imm:$cond), |
| 342 | "#CMOV_RFP80 PSEUDO!", |
| 343 | [(set RFP80:$dst, |
| 344 | (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond, |
| 345 | EFLAGS))]>; |
| 346 | } // Predicates = [NoCMov] |
| 347 | } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] |
| 348 | } // Uses = [EFLAGS] |
| 349 | |
| 350 | } // Constraints = "$src1 = $dst" in |
| 351 | |
| 352 | |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 353 | //===----------------------------------------------------------------------===// |
Chris Lattner | 010496c | 2010-10-05 06:22:35 +0000 | [diff] [blame] | 354 | // Atomic Instruction Pseudo Instructions |
| 355 | //===----------------------------------------------------------------------===// |
| 356 | |
| 357 | // Atomic exchange, and, or, xor |
| 358 | let Constraints = "$val = $dst", Defs = [EFLAGS], |
| 359 | usesCustomInserter = 1 in { |
| 360 | |
| 361 | def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), |
| 362 | "#ATOMAND8 PSEUDO!", |
| 363 | [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>; |
| 364 | def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), |
| 365 | "#ATOMOR8 PSEUDO!", |
| 366 | [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>; |
| 367 | def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), |
| 368 | "#ATOMXOR8 PSEUDO!", |
| 369 | [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>; |
| 370 | def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), |
| 371 | "#ATOMNAND8 PSEUDO!", |
| 372 | [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>; |
| 373 | |
| 374 | def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 375 | "#ATOMAND16 PSEUDO!", |
| 376 | [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>; |
| 377 | def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 378 | "#ATOMOR16 PSEUDO!", |
| 379 | [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>; |
| 380 | def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 381 | "#ATOMXOR16 PSEUDO!", |
| 382 | [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>; |
| 383 | def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 384 | "#ATOMNAND16 PSEUDO!", |
| 385 | [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>; |
| 386 | def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), |
| 387 | "#ATOMMIN16 PSEUDO!", |
| 388 | [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>; |
| 389 | def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 390 | "#ATOMMAX16 PSEUDO!", |
| 391 | [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>; |
| 392 | def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 393 | "#ATOMUMIN16 PSEUDO!", |
| 394 | [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>; |
| 395 | def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), |
| 396 | "#ATOMUMAX16 PSEUDO!", |
| 397 | [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>; |
| 398 | |
| 399 | |
| 400 | def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 401 | "#ATOMAND32 PSEUDO!", |
| 402 | [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>; |
| 403 | def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 404 | "#ATOMOR32 PSEUDO!", |
| 405 | [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>; |
| 406 | def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 407 | "#ATOMXOR32 PSEUDO!", |
| 408 | [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>; |
| 409 | def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 410 | "#ATOMNAND32 PSEUDO!", |
| 411 | [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>; |
| 412 | def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), |
| 413 | "#ATOMMIN32 PSEUDO!", |
| 414 | [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>; |
| 415 | def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 416 | "#ATOMMAX32 PSEUDO!", |
| 417 | [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>; |
| 418 | def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 419 | "#ATOMUMIN32 PSEUDO!", |
| 420 | [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>; |
| 421 | def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), |
| 422 | "#ATOMUMAX32 PSEUDO!", |
| 423 | [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>; |
| 424 | |
| 425 | |
| 426 | |
| 427 | def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 428 | "#ATOMAND64 PSEUDO!", |
| 429 | [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>; |
| 430 | def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 431 | "#ATOMOR64 PSEUDO!", |
| 432 | [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>; |
| 433 | def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 434 | "#ATOMXOR64 PSEUDO!", |
| 435 | [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>; |
| 436 | def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 437 | "#ATOMNAND64 PSEUDO!", |
| 438 | [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>; |
| 439 | def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), |
| 440 | "#ATOMMIN64 PSEUDO!", |
| 441 | [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>; |
| 442 | def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 443 | "#ATOMMAX64 PSEUDO!", |
| 444 | [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>; |
| 445 | def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 446 | "#ATOMUMIN64 PSEUDO!", |
| 447 | [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>; |
| 448 | def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), |
| 449 | "#ATOMUMAX64 PSEUDO!", |
| 450 | [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>; |
| 451 | } |
| 452 | |
| 453 | let Constraints = "$val1 = $dst1, $val2 = $dst2", |
| 454 | Defs = [EFLAGS, EAX, EBX, ECX, EDX], |
| 455 | Uses = [EAX, EBX, ECX, EDX], |
| 456 | mayLoad = 1, mayStore = 1, |
| 457 | usesCustomInserter = 1 in { |
| 458 | def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 459 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 460 | "#ATOMAND6432 PSEUDO!", []>; |
| 461 | def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 462 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 463 | "#ATOMOR6432 PSEUDO!", []>; |
| 464 | def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 465 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 466 | "#ATOMXOR6432 PSEUDO!", []>; |
| 467 | def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 468 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 469 | "#ATOMNAND6432 PSEUDO!", []>; |
| 470 | def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 471 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 472 | "#ATOMADD6432 PSEUDO!", []>; |
| 473 | def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 474 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 475 | "#ATOMSUB6432 PSEUDO!", []>; |
| 476 | def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), |
| 477 | (ins i64mem:$ptr, GR32:$val1, GR32:$val2), |
| 478 | "#ATOMSWAP6432 PSEUDO!", []>; |
| 479 | } |
| 480 | |
| 481 | //===----------------------------------------------------------------------===// |
| 482 | // Normal-Instructions-With-Lock-Prefix Pseudo Instructions |
| 483 | //===----------------------------------------------------------------------===// |
| 484 | |
| 485 | // FIXME: Use normal instructions and add lock prefix dynamically. |
| 486 | |
| 487 | // Memory barriers |
| 488 | |
| 489 | // TODO: Get this to fold the constant into the instruction. |
| 490 | def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero), |
| 491 | "lock\n\t" |
| 492 | "or{l}\t{$zero, $dst|$dst, $zero}", |
| 493 | []>, Requires<[In32BitMode]>, LOCK; |
| 494 | |
| 495 | let hasSideEffects = 1 in |
| 496 | def Int_MemBarrier : I<0, Pseudo, (outs), (ins), |
| 497 | "#MEMBARRIER", |
| 498 | [(X86MemBarrier)]>, Requires<[HasSSE2]>; |
| 499 | |
| 500 | // TODO: Get this to fold the constant into the instruction. |
| 501 | let hasSideEffects = 1, Defs = [ESP] in |
| 502 | def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero), |
| 503 | "lock\n\t" |
| 504 | "or{q}\t{$zero, (%rsp)|(%rsp), $zero}", |
| 505 | [(X86MemBarrierNoSSE GR64:$zero)]>, |
| 506 | Requires<[In64BitMode]>, LOCK; |
| 507 | |
| 508 | |
| 509 | // Optimized codegen when the non-memory output is not used. |
| 510 | let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in { |
| 511 | def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), |
| 512 | "lock\n\t" |
| 513 | "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 514 | def LOCK_ADD16mr : I<0x01, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), |
| 515 | "lock\n\t" |
| 516 | "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; |
| 517 | def LOCK_ADD32mr : I<0x01, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), |
| 518 | "lock\n\t" |
| 519 | "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 520 | def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), |
| 521 | "lock\n\t" |
| 522 | "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 523 | |
| 524 | def LOCK_ADD8mi : Ii8<0x80, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src2), |
| 525 | "lock\n\t" |
| 526 | "add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 527 | def LOCK_ADD16mi : Ii16<0x81, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src2), |
| 528 | "lock\n\t" |
| 529 | "add{w}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 530 | def LOCK_ADD32mi : Ii32<0x81, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src2), |
| 531 | "lock\n\t" |
| 532 | "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 533 | def LOCK_ADD64mi32 : RIi32<0x81, MRM0m, (outs), |
| 534 | (ins i64mem:$dst, i64i32imm :$src2), |
| 535 | "lock\n\t" |
| 536 | "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 537 | |
| 538 | def LOCK_ADD16mi8 : Ii8<0x83, MRM0m, (outs), (ins i16mem:$dst, i16i8imm :$src2), |
| 539 | "lock\n\t" |
| 540 | "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; |
| 541 | def LOCK_ADD32mi8 : Ii8<0x83, MRM0m, (outs), (ins i32mem:$dst, i32i8imm :$src2), |
| 542 | "lock\n\t" |
| 543 | "add{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 544 | def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs), |
| 545 | (ins i64mem:$dst, i64i8imm :$src2), |
| 546 | "lock\n\t" |
| 547 | "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 548 | |
| 549 | def LOCK_SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2), |
| 550 | "lock\n\t" |
| 551 | "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 552 | def LOCK_SUB16mr : I<0x29, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), |
| 553 | "lock\n\t" |
| 554 | "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; |
| 555 | def LOCK_SUB32mr : I<0x29, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), |
| 556 | "lock\n\t" |
| 557 | "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 558 | def LOCK_SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), |
| 559 | "lock\n\t" |
| 560 | "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 561 | |
| 562 | |
| 563 | def LOCK_SUB8mi : Ii8<0x80, MRM5m, (outs), (ins i8mem :$dst, i8imm:$src2), |
| 564 | "lock\n\t" |
| 565 | "sub{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 566 | def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2), |
| 567 | "lock\n\t" |
| 568 | "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; |
| 569 | def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2), |
| 570 | "lock\n\t" |
| 571 | "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 572 | def LOCK_SUB64mi32 : RIi32<0x81, MRM5m, (outs), |
| 573 | (ins i64mem:$dst, i64i32imm:$src2), |
| 574 | "lock\n\t" |
| 575 | "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 576 | |
| 577 | |
| 578 | def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2), |
| 579 | "lock\n\t" |
| 580 | "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; |
| 581 | def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2), |
| 582 | "lock\n\t" |
| 583 | "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 584 | def LOCK_SUB64mi8 : RIi8<0x83, MRM5m, (outs), |
| 585 | (ins i64mem:$dst, i64i8imm :$src2), |
| 586 | "lock\n\t" |
| 587 | "sub{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; |
| 588 | |
| 589 | def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), |
| 590 | "lock\n\t" |
| 591 | "inc{b}\t$dst", []>, LOCK; |
| 592 | def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), |
| 593 | "lock\n\t" |
| 594 | "inc{w}\t$dst", []>, OpSize, LOCK; |
| 595 | def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), |
| 596 | "lock\n\t" |
| 597 | "inc{l}\t$dst", []>, LOCK; |
| 598 | def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), |
| 599 | "lock\n\t" |
| 600 | "inc{q}\t$dst", []>, LOCK; |
| 601 | |
| 602 | def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), |
| 603 | "lock\n\t" |
| 604 | "dec{b}\t$dst", []>, LOCK; |
| 605 | def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), |
| 606 | "lock\n\t" |
| 607 | "dec{w}\t$dst", []>, OpSize, LOCK; |
| 608 | def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), |
| 609 | "lock\n\t" |
| 610 | "dec{l}\t$dst", []>, LOCK; |
| 611 | def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), |
| 612 | "lock\n\t" |
| 613 | "dec{q}\t$dst", []>, LOCK; |
| 614 | } |
| 615 | |
| 616 | // Atomic compare and swap. |
| 617 | let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in { |
| 618 | def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr), |
| 619 | "lock\n\t" |
| 620 | "cmpxchg8b\t$ptr", |
| 621 | [(X86cas8 addr:$ptr)]>, TB, LOCK; |
| 622 | } |
| 623 | let Defs = [AL, EFLAGS], Uses = [AL] in { |
| 624 | def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), |
| 625 | "lock\n\t" |
| 626 | "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}", |
| 627 | [(X86cas addr:$ptr, GR8:$swap, 1)]>, TB, LOCK; |
| 628 | } |
| 629 | |
| 630 | let Defs = [AX, EFLAGS], Uses = [AX] in { |
| 631 | def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap), |
| 632 | "lock\n\t" |
| 633 | "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}", |
| 634 | [(X86cas addr:$ptr, GR16:$swap, 2)]>, TB, OpSize, LOCK; |
| 635 | } |
| 636 | |
| 637 | let Defs = [EAX, EFLAGS], Uses = [EAX] in { |
| 638 | def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap), |
| 639 | "lock\n\t" |
| 640 | "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}", |
| 641 | [(X86cas addr:$ptr, GR32:$swap, 4)]>, TB, LOCK; |
| 642 | } |
| 643 | |
| 644 | let Defs = [RAX, EFLAGS], Uses = [RAX] in { |
| 645 | def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), |
| 646 | "lock\n\t" |
| 647 | "cmpxchgq\t$swap,$ptr", |
| 648 | [(X86cas addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; |
| 649 | } |
| 650 | |
| 651 | // Atomic exchange and add |
| 652 | let Constraints = "$val = $dst", Defs = [EFLAGS] in { |
| 653 | def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), |
| 654 | "lock\n\t" |
| 655 | "xadd{b}\t{$val, $ptr|$ptr, $val}", |
| 656 | [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>, |
| 657 | TB, LOCK; |
| 658 | def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr), |
| 659 | "lock\n\t" |
| 660 | "xadd{w}\t{$val, $ptr|$ptr, $val}", |
| 661 | [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>, |
| 662 | TB, OpSize, LOCK; |
| 663 | def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr), |
| 664 | "lock\n\t" |
| 665 | "xadd{l}\t{$val, $ptr|$ptr, $val}", |
| 666 | [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>, |
| 667 | TB, LOCK; |
| 668 | def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr), |
| 669 | "lock\n\t" |
| 670 | "xadd\t$val, $ptr", |
| 671 | [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, |
| 672 | TB, LOCK; |
| 673 | } |
| 674 | |
Chris Lattner | 5673e1d | 2010-10-05 06:41:40 +0000 | [diff] [blame] | 675 | //===----------------------------------------------------------------------===// |
| 676 | // Conditional Move Pseudo Instructions. |
| 677 | //===----------------------------------------------------------------------===// |
| 678 | |
| 679 | |
| 680 | // CMOV* - Used to implement the SSE SELECT DAG operation. Expanded after |
| 681 | // instruction selection into a branch sequence. |
| 682 | let Uses = [EFLAGS], usesCustomInserter = 1 in { |
| 683 | def CMOV_FR32 : I<0, Pseudo, |
| 684 | (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond), |
| 685 | "#CMOV_FR32 PSEUDO!", |
| 686 | [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond, |
| 687 | EFLAGS))]>; |
| 688 | def CMOV_FR64 : I<0, Pseudo, |
| 689 | (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond), |
| 690 | "#CMOV_FR64 PSEUDO!", |
| 691 | [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond, |
| 692 | EFLAGS))]>; |
| 693 | def CMOV_V4F32 : I<0, Pseudo, |
| 694 | (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), |
| 695 | "#CMOV_V4F32 PSEUDO!", |
| 696 | [(set VR128:$dst, |
| 697 | (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, |
| 698 | EFLAGS)))]>; |
| 699 | def CMOV_V2F64 : I<0, Pseudo, |
| 700 | (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), |
| 701 | "#CMOV_V2F64 PSEUDO!", |
| 702 | [(set VR128:$dst, |
| 703 | (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, |
| 704 | EFLAGS)))]>; |
| 705 | def CMOV_V2I64 : I<0, Pseudo, |
| 706 | (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond), |
| 707 | "#CMOV_V2I64 PSEUDO!", |
| 708 | [(set VR128:$dst, |
| 709 | (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond, |
| 710 | EFLAGS)))]>; |
| 711 | } |
| 712 | |
Chris Lattner | 010496c | 2010-10-05 06:22:35 +0000 | [diff] [blame] | 713 | |
| 714 | //===----------------------------------------------------------------------===// |
| 715 | // DAG Pattern Matching Rules |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 716 | //===----------------------------------------------------------------------===// |
| 717 | |
| 718 | // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable |
| 719 | def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>; |
| 720 | def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>; |
| 721 | def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>; |
| 722 | def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>; |
| 723 | def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>; |
| 724 | def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>; |
| 725 | |
| 726 | def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)), |
| 727 | (ADD32ri GR32:$src1, tconstpool:$src2)>; |
| 728 | def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)), |
| 729 | (ADD32ri GR32:$src1, tjumptable:$src2)>; |
| 730 | def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)), |
| 731 | (ADD32ri GR32:$src1, tglobaladdr:$src2)>; |
| 732 | def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)), |
| 733 | (ADD32ri GR32:$src1, texternalsym:$src2)>; |
| 734 | def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)), |
| 735 | (ADD32ri GR32:$src1, tblockaddress:$src2)>; |
| 736 | |
| 737 | def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst), |
| 738 | (MOV32mi addr:$dst, tglobaladdr:$src)>; |
| 739 | def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst), |
| 740 | (MOV32mi addr:$dst, texternalsym:$src)>; |
| 741 | def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst), |
| 742 | (MOV32mi addr:$dst, tblockaddress:$src)>; |
| 743 | |
| 744 | |
| 745 | |
| 746 | // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small |
| 747 | // code model mode, should use 'movabs'. FIXME: This is really a hack, the |
| 748 | // 'movabs' predicate should handle this sort of thing. |
| 749 | def : Pat<(i64 (X86Wrapper tconstpool :$dst)), |
| 750 | (MOV64ri tconstpool :$dst)>, Requires<[FarData]>; |
| 751 | def : Pat<(i64 (X86Wrapper tjumptable :$dst)), |
| 752 | (MOV64ri tjumptable :$dst)>, Requires<[FarData]>; |
| 753 | def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), |
| 754 | (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>; |
| 755 | def : Pat<(i64 (X86Wrapper texternalsym:$dst)), |
| 756 | (MOV64ri texternalsym:$dst)>, Requires<[FarData]>; |
| 757 | def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), |
| 758 | (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>; |
| 759 | |
| 760 | // In static codegen with small code model, we can get the address of a label |
| 761 | // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of |
| 762 | // the MOV64ri64i32 should accept these. |
| 763 | def : Pat<(i64 (X86Wrapper tconstpool :$dst)), |
| 764 | (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>; |
| 765 | def : Pat<(i64 (X86Wrapper tjumptable :$dst)), |
| 766 | (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>; |
| 767 | def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), |
| 768 | (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>; |
| 769 | def : Pat<(i64 (X86Wrapper texternalsym:$dst)), |
| 770 | (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>; |
| 771 | def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), |
| 772 | (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>; |
| 773 | |
| 774 | // In kernel code model, we can get the address of a label |
| 775 | // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of |
| 776 | // the MOV64ri32 should accept these. |
| 777 | def : Pat<(i64 (X86Wrapper tconstpool :$dst)), |
| 778 | (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; |
| 779 | def : Pat<(i64 (X86Wrapper tjumptable :$dst)), |
| 780 | (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; |
| 781 | def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), |
| 782 | (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; |
| 783 | def : Pat<(i64 (X86Wrapper texternalsym:$dst)), |
| 784 | (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; |
| 785 | def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), |
| 786 | (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; |
| 787 | |
| 788 | // If we have small model and -static mode, it is safe to store global addresses |
| 789 | // directly as immediates. FIXME: This is really a hack, the 'imm' predicate |
| 790 | // for MOV64mi32 should handle this sort of thing. |
| 791 | def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), |
| 792 | (MOV64mi32 addr:$dst, tconstpool:$src)>, |
| 793 | Requires<[NearData, IsStatic]>; |
| 794 | def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), |
| 795 | (MOV64mi32 addr:$dst, tjumptable:$src)>, |
| 796 | Requires<[NearData, IsStatic]>; |
| 797 | def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), |
| 798 | (MOV64mi32 addr:$dst, tglobaladdr:$src)>, |
| 799 | Requires<[NearData, IsStatic]>; |
| 800 | def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), |
| 801 | (MOV64mi32 addr:$dst, texternalsym:$src)>, |
| 802 | Requires<[NearData, IsStatic]>; |
| 803 | def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), |
| 804 | (MOV64mi32 addr:$dst, tblockaddress:$src)>, |
| 805 | Requires<[NearData, IsStatic]>; |
| 806 | |
| 807 | |
| 808 | |
| 809 | // Calls |
| 810 | |
| 811 | // tls has some funny stuff here... |
| 812 | // This corresponds to movabs $foo@tpoff, %rax |
| 813 | def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), |
| 814 | (MOV64ri tglobaltlsaddr :$dst)>; |
| 815 | // This corresponds to add $foo@tpoff, %rax |
| 816 | def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), |
| 817 | (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; |
| 818 | // This corresponds to mov foo@tpoff(%rbx), %eax |
| 819 | def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))), |
| 820 | (MOV64rm tglobaltlsaddr :$dst)>; |
| 821 | |
| 822 | |
| 823 | // Direct PC relative function call for small code model. 32-bit displacement |
| 824 | // sign extended to 64-bit. |
| 825 | def : Pat<(X86call (i64 tglobaladdr:$dst)), |
| 826 | (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>; |
| 827 | def : Pat<(X86call (i64 texternalsym:$dst)), |
| 828 | (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>; |
| 829 | |
| 830 | def : Pat<(X86call (i64 tglobaladdr:$dst)), |
| 831 | (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>; |
| 832 | def : Pat<(X86call (i64 texternalsym:$dst)), |
| 833 | (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>; |
| 834 | |
| 835 | // tailcall stuff |
| 836 | def : Pat<(X86tcret GR32_TC:$dst, imm:$off), |
| 837 | (TCRETURNri GR32_TC:$dst, imm:$off)>, |
| 838 | Requires<[In32BitMode]>; |
| 839 | |
| 840 | // FIXME: This is disabled for 32-bit PIC mode because the global base |
| 841 | // register which is part of the address mode may be assigned a |
| 842 | // callee-saved register. |
| 843 | def : Pat<(X86tcret (load addr:$dst), imm:$off), |
| 844 | (TCRETURNmi addr:$dst, imm:$off)>, |
| 845 | Requires<[In32BitMode, IsNotPIC]>; |
| 846 | |
| 847 | def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), |
| 848 | (TCRETURNdi texternalsym:$dst, imm:$off)>, |
| 849 | Requires<[In32BitMode]>; |
| 850 | |
| 851 | def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), |
| 852 | (TCRETURNdi texternalsym:$dst, imm:$off)>, |
| 853 | Requires<[In32BitMode]>; |
| 854 | |
| 855 | def : Pat<(X86tcret GR64_TC:$dst, imm:$off), |
| 856 | (TCRETURNri64 GR64_TC:$dst, imm:$off)>, |
| 857 | Requires<[In64BitMode]>; |
| 858 | |
| 859 | def : Pat<(X86tcret (load addr:$dst), imm:$off), |
| 860 | (TCRETURNmi64 addr:$dst, imm:$off)>, |
| 861 | Requires<[In64BitMode]>; |
| 862 | |
| 863 | def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off), |
| 864 | (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>, |
| 865 | Requires<[In64BitMode]>; |
| 866 | |
| 867 | def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), |
| 868 | (TCRETURNdi64 texternalsym:$dst, imm:$off)>, |
| 869 | Requires<[In64BitMode]>; |
| 870 | |
| 871 | // Normal calls, with various flavors of addresses. |
| 872 | def : Pat<(X86call (i32 tglobaladdr:$dst)), |
| 873 | (CALLpcrel32 tglobaladdr:$dst)>; |
| 874 | def : Pat<(X86call (i32 texternalsym:$dst)), |
| 875 | (CALLpcrel32 texternalsym:$dst)>; |
| 876 | def : Pat<(X86call (i32 imm:$dst)), |
| 877 | (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; |
| 878 | |
| 879 | // X86 specific add which produces a flag. |
| 880 | def : Pat<(addc GR32:$src1, GR32:$src2), |
| 881 | (ADD32rr GR32:$src1, GR32:$src2)>; |
| 882 | def : Pat<(addc GR32:$src1, (load addr:$src2)), |
| 883 | (ADD32rm GR32:$src1, addr:$src2)>; |
| 884 | def : Pat<(addc GR32:$src1, imm:$src2), |
| 885 | (ADD32ri GR32:$src1, imm:$src2)>; |
| 886 | def : Pat<(addc GR32:$src1, i32immSExt8:$src2), |
| 887 | (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 888 | |
| 889 | def : Pat<(addc GR64:$src1, GR64:$src2), |
| 890 | (ADD64rr GR64:$src1, GR64:$src2)>; |
| 891 | def : Pat<(addc GR64:$src1, (load addr:$src2)), |
| 892 | (ADD64rm GR64:$src1, addr:$src2)>; |
| 893 | def : Pat<(addc GR64:$src1, i64immSExt8:$src2), |
| 894 | (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 895 | def : Pat<(addc GR64:$src1, i64immSExt32:$src2), |
| 896 | (ADD64ri32 GR64:$src1, imm:$src2)>; |
| 897 | |
| 898 | def : Pat<(subc GR32:$src1, GR32:$src2), |
| 899 | (SUB32rr GR32:$src1, GR32:$src2)>; |
| 900 | def : Pat<(subc GR32:$src1, (load addr:$src2)), |
| 901 | (SUB32rm GR32:$src1, addr:$src2)>; |
| 902 | def : Pat<(subc GR32:$src1, imm:$src2), |
| 903 | (SUB32ri GR32:$src1, imm:$src2)>; |
| 904 | def : Pat<(subc GR32:$src1, i32immSExt8:$src2), |
| 905 | (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 906 | |
| 907 | def : Pat<(subc GR64:$src1, GR64:$src2), |
| 908 | (SUB64rr GR64:$src1, GR64:$src2)>; |
| 909 | def : Pat<(subc GR64:$src1, (load addr:$src2)), |
| 910 | (SUB64rm GR64:$src1, addr:$src2)>; |
| 911 | def : Pat<(subc GR64:$src1, i64immSExt8:$src2), |
| 912 | (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 913 | def : Pat<(subc GR64:$src1, imm:$src2), |
| 914 | (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 915 | |
| 916 | // Comparisons. |
| 917 | |
| 918 | // TEST R,R is smaller than CMP R,0 |
| 919 | def : Pat<(X86cmp GR8:$src1, 0), |
| 920 | (TEST8rr GR8:$src1, GR8:$src1)>; |
| 921 | def : Pat<(X86cmp GR16:$src1, 0), |
| 922 | (TEST16rr GR16:$src1, GR16:$src1)>; |
| 923 | def : Pat<(X86cmp GR32:$src1, 0), |
| 924 | (TEST32rr GR32:$src1, GR32:$src1)>; |
| 925 | def : Pat<(X86cmp GR64:$src1, 0), |
| 926 | (TEST64rr GR64:$src1, GR64:$src1)>; |
| 927 | |
| 928 | // Conditional moves with folded loads with operands swapped and conditions |
| 929 | // inverted. |
Chris Lattner | 286997c | 2010-10-05 22:42:54 +0000 | [diff] [blame] | 930 | multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32, |
| 931 | Instruction Inst64> { |
| 932 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS), |
| 933 | (Inst16 GR16:$src2, addr:$src1)>; |
| 934 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS), |
| 935 | (Inst32 GR32:$src2, addr:$src1)>; |
| 936 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS), |
| 937 | (Inst64 GR64:$src2, addr:$src1)>; |
| 938 | } |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 939 | |
Chris Lattner | df72eae | 2010-10-05 22:51:56 +0000 | [diff] [blame] | 940 | defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>; |
| 941 | defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>; |
| 942 | defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>; |
| 943 | defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>; |
| 944 | defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>; |
Chris Lattner | 25cbf50 | 2010-10-05 23:00:14 +0000 | [diff] [blame] | 945 | defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>; |
Chris Lattner | df72eae | 2010-10-05 22:51:56 +0000 | [diff] [blame] | 946 | defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>; |
| 947 | defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>; |
| 948 | defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>; |
| 949 | defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>; |
| 950 | defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>; |
| 951 | defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>; |
| 952 | defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>; |
| 953 | defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>; |
| 954 | defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>; |
| 955 | defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>; |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 956 | |
| 957 | // zextload bool -> zextload byte |
| 958 | def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; |
| 959 | def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; |
| 960 | def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; |
| 961 | def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; |
| 962 | |
| 963 | // extload bool -> extload byte |
| 964 | // When extloading from 16-bit and smaller memory locations into 64-bit |
| 965 | // registers, use zero-extending loads so that the entire 64-bit register is |
| 966 | // defined, avoiding partial-register updates. |
| 967 | |
| 968 | def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; |
| 969 | def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; |
| 970 | def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; |
| 971 | def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>; |
| 972 | def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; |
| 973 | def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; |
| 974 | |
| 975 | def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; |
| 976 | def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; |
| 977 | def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; |
| 978 | // For other extloads, use subregs, since the high contents of the register are |
| 979 | // defined after an extload. |
| 980 | def : Pat<(extloadi64i32 addr:$src), |
| 981 | (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), |
| 982 | sub_32bit)>; |
| 983 | |
| 984 | // anyext. Define these to do an explicit zero-extend to |
| 985 | // avoid partial-register updates. |
| 986 | def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>; |
| 987 | def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; |
| 988 | |
| 989 | // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. |
| 990 | def : Pat<(i32 (anyext GR16:$src)), |
| 991 | (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; |
| 992 | |
| 993 | def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>; |
| 994 | def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>; |
| 995 | def : Pat<(i64 (anyext GR32:$src)), |
| 996 | (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; |
| 997 | |
Chris Lattner | d8cc272 | 2010-10-05 06:47:35 +0000 | [diff] [blame] | 998 | |
| 999 | // Any instruction that defines a 32-bit result leaves the high half of the |
| 1000 | // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may |
| 1001 | // be copying from a truncate. And x86's cmov doesn't do anything if the |
| 1002 | // condition is false. But any other 32-bit operation will zero-extend |
| 1003 | // up to 64 bits. |
| 1004 | def def32 : PatLeaf<(i32 GR32:$src), [{ |
| 1005 | return N->getOpcode() != ISD::TRUNCATE && |
| 1006 | N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && |
| 1007 | N->getOpcode() != ISD::CopyFromReg && |
| 1008 | N->getOpcode() != X86ISD::CMOV; |
| 1009 | }]>; |
| 1010 | |
| 1011 | // In the case of a 32-bit def that is known to implicitly zero-extend, |
| 1012 | // we can use a SUBREG_TO_REG. |
| 1013 | def : Pat<(i64 (zext def32:$src)), |
| 1014 | (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; |
| 1015 | |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 1016 | //===----------------------------------------------------------------------===// |
Chris Lattner | 99ae665 | 2010-10-08 03:54:52 +0000 | [diff] [blame] | 1017 | // Pattern match OR as ADD |
| 1018 | //===----------------------------------------------------------------------===// |
| 1019 | |
| 1020 | // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be |
| 1021 | // 3-addressified into an LEA instruction to avoid copies. However, we also |
| 1022 | // want to finally emit these instructions as an or at the end of the code |
| 1023 | // generator to make the generated code easier to read. To do this, we select |
| 1024 | // into "disjoint bits" pseudo ops. |
| 1025 | |
| 1026 | // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. |
| 1027 | def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ |
| 1028 | if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) |
| 1029 | return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); |
| 1030 | |
| 1031 | unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits(); |
| 1032 | APInt Mask = APInt::getAllOnesValue(BitWidth); |
| 1033 | APInt KnownZero0, KnownOne0; |
| 1034 | CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0); |
| 1035 | APInt KnownZero1, KnownOne1; |
| 1036 | CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0); |
| 1037 | return (~KnownZero0 & ~KnownZero1) == 0; |
| 1038 | }]>; |
| 1039 | |
| 1040 | |
| 1041 | // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. |
| 1042 | let AddedComplexity = 5 in { // Try this before the selecting to OR |
| 1043 | |
| 1044 | let isCommutable = 1, isConvertibleToThreeAddress = 1, |
| 1045 | Constraints = "$src1 = $dst", Defs = [EFLAGS] in { |
| 1046 | def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), |
| 1047 | "", // orw/addw REG, REG |
| 1048 | [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>; |
| 1049 | def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), |
| 1050 | "", // orl/addl REG, REG |
| 1051 | [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>; |
| 1052 | def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), |
| 1053 | "", // orq/addq REG, REG |
| 1054 | [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>; |
Chris Lattner | 15df55d | 2010-10-08 03:57:25 +0000 | [diff] [blame] | 1055 | |
| 1056 | |
| 1057 | def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), |
| 1058 | "", // orw/addw REG, imm |
| 1059 | [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>; |
| 1060 | def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), |
| 1061 | "", // orl/addl REG, imm |
| 1062 | [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>; |
| 1063 | def ADD64ri32_DB : I<0, Pseudo, |
| 1064 | (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), |
| 1065 | "", // orq/addq REG, imm |
| 1066 | [(set GR64:$dst, (or_is_add GR64:$src1, |
| 1067 | i64immSExt32:$src2))]>; |
| 1068 | |
| 1069 | def ADD16ri8_DB : I<0, Pseudo, |
| 1070 | (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), |
| 1071 | "", // orw/addw REG, imm8 |
| 1072 | [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>; |
| 1073 | def ADD32ri8_DB : I<0, Pseudo, |
| 1074 | (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), |
| 1075 | "", // orl/addl REG, imm8 |
| 1076 | [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>; |
| 1077 | def ADD64ri8_DB : I<0, Pseudo, |
| 1078 | (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), |
| 1079 | "", // orq/addq REG, imm8 |
| 1080 | [(set GR64:$dst, (or_is_add GR64:$src1, |
| 1081 | i64immSExt8:$src2))]>; |
Chris Lattner | 99ae665 | 2010-10-08 03:54:52 +0000 | [diff] [blame] | 1082 | } |
Chris Lattner | 99ae665 | 2010-10-08 03:54:52 +0000 | [diff] [blame] | 1083 | } // AddedComplexity |
| 1084 | |
| 1085 | |
| 1086 | //===----------------------------------------------------------------------===// |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 1087 | // Some peepholes |
| 1088 | //===----------------------------------------------------------------------===// |
| 1089 | |
| 1090 | // Odd encoding trick: -128 fits into an 8-bit immediate field while |
| 1091 | // +128 doesn't, so in this special case use a sub instead of an add. |
| 1092 | def : Pat<(add GR16:$src1, 128), |
| 1093 | (SUB16ri8 GR16:$src1, -128)>; |
| 1094 | def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), |
| 1095 | (SUB16mi8 addr:$dst, -128)>; |
| 1096 | |
| 1097 | def : Pat<(add GR32:$src1, 128), |
| 1098 | (SUB32ri8 GR32:$src1, -128)>; |
| 1099 | def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), |
| 1100 | (SUB32mi8 addr:$dst, -128)>; |
| 1101 | |
| 1102 | def : Pat<(add GR64:$src1, 128), |
| 1103 | (SUB64ri8 GR64:$src1, -128)>; |
| 1104 | def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), |
| 1105 | (SUB64mi8 addr:$dst, -128)>; |
| 1106 | |
| 1107 | // The same trick applies for 32-bit immediate fields in 64-bit |
| 1108 | // instructions. |
| 1109 | def : Pat<(add GR64:$src1, 0x0000000080000000), |
| 1110 | (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; |
| 1111 | def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst), |
| 1112 | (SUB64mi32 addr:$dst, 0xffffffff80000000)>; |
| 1113 | |
Rafael Espindola | dba81cf | 2010-10-13 13:31:20 +0000 | [diff] [blame^] | 1114 | // To avoid needing to materialize an immediate in a register, use a 32-bit and |
| 1115 | // with implicit zero-extension instead of a 64-bit and if the immediate has at |
| 1116 | // least 32 bits of leading zeros. If in addition the last 32 bits can be |
| 1117 | // represented with a sign extension of a 8 bit constant, use that. |
| 1118 | |
| 1119 | def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm), |
| 1120 | (SUBREG_TO_REG |
| 1121 | (i64 0), |
| 1122 | (AND32ri8 |
| 1123 | (EXTRACT_SUBREG GR64:$src, sub_32bit), |
| 1124 | (i32 (GetLo8XForm imm:$imm))), |
| 1125 | sub_32bit)>; |
| 1126 | |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 1127 | def : Pat<(and GR64:$src, i64immZExt32:$imm), |
| 1128 | (SUBREG_TO_REG |
| 1129 | (i64 0), |
| 1130 | (AND32ri |
| 1131 | (EXTRACT_SUBREG GR64:$src, sub_32bit), |
| 1132 | (i32 (GetLo32XForm imm:$imm))), |
| 1133 | sub_32bit)>; |
| 1134 | |
| 1135 | |
| 1136 | // r & (2^16-1) ==> movz |
| 1137 | def : Pat<(and GR32:$src1, 0xffff), |
| 1138 | (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; |
| 1139 | // r & (2^8-1) ==> movz |
| 1140 | def : Pat<(and GR32:$src1, 0xff), |
| 1141 | (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1, |
| 1142 | GR32_ABCD)), |
| 1143 | sub_8bit))>, |
| 1144 | Requires<[In32BitMode]>; |
| 1145 | // r & (2^8-1) ==> movz |
| 1146 | def : Pat<(and GR16:$src1, 0xff), |
| 1147 | (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1, |
| 1148 | GR16_ABCD)), |
| 1149 | sub_8bit))>, |
| 1150 | Requires<[In32BitMode]>; |
| 1151 | |
| 1152 | // r & (2^32-1) ==> movz |
| 1153 | def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), |
| 1154 | (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; |
| 1155 | // r & (2^16-1) ==> movz |
| 1156 | def : Pat<(and GR64:$src, 0xffff), |
| 1157 | (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>; |
| 1158 | // r & (2^8-1) ==> movz |
| 1159 | def : Pat<(and GR64:$src, 0xff), |
| 1160 | (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>; |
| 1161 | // r & (2^8-1) ==> movz |
| 1162 | def : Pat<(and GR32:$src1, 0xff), |
| 1163 | (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>, |
| 1164 | Requires<[In64BitMode]>; |
| 1165 | // r & (2^8-1) ==> movz |
| 1166 | def : Pat<(and GR16:$src1, 0xff), |
| 1167 | (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>, |
| 1168 | Requires<[In64BitMode]>; |
| 1169 | |
| 1170 | |
| 1171 | // sext_inreg patterns |
| 1172 | def : Pat<(sext_inreg GR32:$src, i16), |
| 1173 | (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; |
| 1174 | def : Pat<(sext_inreg GR32:$src, i8), |
| 1175 | (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 1176 | GR32_ABCD)), |
| 1177 | sub_8bit))>, |
| 1178 | Requires<[In32BitMode]>; |
| 1179 | def : Pat<(sext_inreg GR16:$src, i8), |
| 1180 | (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, |
| 1181 | GR16_ABCD)), |
| 1182 | sub_8bit))>, |
| 1183 | Requires<[In32BitMode]>; |
| 1184 | |
| 1185 | def : Pat<(sext_inreg GR64:$src, i32), |
| 1186 | (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; |
| 1187 | def : Pat<(sext_inreg GR64:$src, i16), |
| 1188 | (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; |
| 1189 | def : Pat<(sext_inreg GR64:$src, i8), |
| 1190 | (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; |
| 1191 | def : Pat<(sext_inreg GR32:$src, i8), |
| 1192 | (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>, |
| 1193 | Requires<[In64BitMode]>; |
| 1194 | def : Pat<(sext_inreg GR16:$src, i8), |
| 1195 | (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>, |
| 1196 | Requires<[In64BitMode]>; |
| 1197 | |
| 1198 | |
| 1199 | // trunc patterns |
| 1200 | def : Pat<(i16 (trunc GR32:$src)), |
| 1201 | (EXTRACT_SUBREG GR32:$src, sub_16bit)>; |
| 1202 | def : Pat<(i8 (trunc GR32:$src)), |
| 1203 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 1204 | sub_8bit)>, |
| 1205 | Requires<[In32BitMode]>; |
| 1206 | def : Pat<(i8 (trunc GR16:$src)), |
| 1207 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1208 | sub_8bit)>, |
| 1209 | Requires<[In32BitMode]>; |
| 1210 | def : Pat<(i32 (trunc GR64:$src)), |
| 1211 | (EXTRACT_SUBREG GR64:$src, sub_32bit)>; |
| 1212 | def : Pat<(i16 (trunc GR64:$src)), |
| 1213 | (EXTRACT_SUBREG GR64:$src, sub_16bit)>; |
| 1214 | def : Pat<(i8 (trunc GR64:$src)), |
| 1215 | (EXTRACT_SUBREG GR64:$src, sub_8bit)>; |
| 1216 | def : Pat<(i8 (trunc GR32:$src)), |
| 1217 | (EXTRACT_SUBREG GR32:$src, sub_8bit)>, |
| 1218 | Requires<[In64BitMode]>; |
| 1219 | def : Pat<(i8 (trunc GR16:$src)), |
| 1220 | (EXTRACT_SUBREG GR16:$src, sub_8bit)>, |
| 1221 | Requires<[In64BitMode]>; |
| 1222 | |
| 1223 | // h-register tricks |
| 1224 | def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), |
| 1225 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1226 | sub_8bit_hi)>, |
| 1227 | Requires<[In32BitMode]>; |
| 1228 | def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), |
| 1229 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 1230 | sub_8bit_hi)>, |
| 1231 | Requires<[In32BitMode]>; |
| 1232 | def : Pat<(srl GR16:$src, (i8 8)), |
| 1233 | (EXTRACT_SUBREG |
| 1234 | (MOVZX32rr8 |
| 1235 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1236 | sub_8bit_hi)), |
| 1237 | sub_16bit)>, |
| 1238 | Requires<[In32BitMode]>; |
| 1239 | def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), |
| 1240 | (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, |
| 1241 | GR16_ABCD)), |
| 1242 | sub_8bit_hi))>, |
| 1243 | Requires<[In32BitMode]>; |
| 1244 | def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), |
| 1245 | (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, |
| 1246 | GR16_ABCD)), |
| 1247 | sub_8bit_hi))>, |
| 1248 | Requires<[In32BitMode]>; |
| 1249 | def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), |
| 1250 | (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 1251 | GR32_ABCD)), |
| 1252 | sub_8bit_hi))>, |
| 1253 | Requires<[In32BitMode]>; |
| 1254 | def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)), |
| 1255 | (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 1256 | GR32_ABCD)), |
| 1257 | sub_8bit_hi))>, |
| 1258 | Requires<[In32BitMode]>; |
| 1259 | |
| 1260 | // h-register tricks. |
| 1261 | // For now, be conservative on x86-64 and use an h-register extract only if the |
| 1262 | // value is immediately zero-extended or stored, which are somewhat common |
| 1263 | // cases. This uses a bunch of code to prevent a register requiring a REX prefix |
| 1264 | // from being allocated in the same instruction as the h register, as there's |
| 1265 | // currently no way to describe this requirement to the register allocator. |
| 1266 | |
| 1267 | // h-register extract and zero-extend. |
| 1268 | def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), |
| 1269 | (SUBREG_TO_REG |
| 1270 | (i64 0), |
| 1271 | (MOVZX32_NOREXrr8 |
| 1272 | (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)), |
| 1273 | sub_8bit_hi)), |
| 1274 | sub_32bit)>; |
| 1275 | def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), |
| 1276 | (MOVZX32_NOREXrr8 |
| 1277 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 1278 | sub_8bit_hi))>, |
| 1279 | Requires<[In64BitMode]>; |
| 1280 | def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)), |
| 1281 | (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 1282 | GR32_ABCD)), |
| 1283 | sub_8bit_hi))>, |
| 1284 | Requires<[In64BitMode]>; |
| 1285 | def : Pat<(srl GR16:$src, (i8 8)), |
| 1286 | (EXTRACT_SUBREG |
| 1287 | (MOVZX32_NOREXrr8 |
| 1288 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1289 | sub_8bit_hi)), |
| 1290 | sub_16bit)>, |
| 1291 | Requires<[In64BitMode]>; |
| 1292 | def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), |
| 1293 | (MOVZX32_NOREXrr8 |
| 1294 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1295 | sub_8bit_hi))>, |
| 1296 | Requires<[In64BitMode]>; |
| 1297 | def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), |
| 1298 | (MOVZX32_NOREXrr8 |
| 1299 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1300 | sub_8bit_hi))>, |
| 1301 | Requires<[In64BitMode]>; |
| 1302 | def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), |
| 1303 | (SUBREG_TO_REG |
| 1304 | (i64 0), |
| 1305 | (MOVZX32_NOREXrr8 |
| 1306 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1307 | sub_8bit_hi)), |
| 1308 | sub_32bit)>; |
| 1309 | def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), |
| 1310 | (SUBREG_TO_REG |
| 1311 | (i64 0), |
| 1312 | (MOVZX32_NOREXrr8 |
| 1313 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1314 | sub_8bit_hi)), |
| 1315 | sub_32bit)>; |
| 1316 | |
| 1317 | // h-register extract and store. |
| 1318 | def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), |
| 1319 | (MOV8mr_NOREX |
| 1320 | addr:$dst, |
| 1321 | (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)), |
| 1322 | sub_8bit_hi))>; |
| 1323 | def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), |
| 1324 | (MOV8mr_NOREX |
| 1325 | addr:$dst, |
| 1326 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 1327 | sub_8bit_hi))>, |
| 1328 | Requires<[In64BitMode]>; |
| 1329 | def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), |
| 1330 | (MOV8mr_NOREX |
| 1331 | addr:$dst, |
| 1332 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 1333 | sub_8bit_hi))>, |
| 1334 | Requires<[In64BitMode]>; |
| 1335 | |
| 1336 | |
| 1337 | // (shl x, 1) ==> (add x, x) |
| 1338 | def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; |
| 1339 | def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; |
| 1340 | def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; |
| 1341 | def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; |
| 1342 | |
| 1343 | // (shl x (and y, 31)) ==> (shl x, y) |
| 1344 | def : Pat<(shl GR8:$src1, (and CL, 31)), |
| 1345 | (SHL8rCL GR8:$src1)>; |
| 1346 | def : Pat<(shl GR16:$src1, (and CL, 31)), |
| 1347 | (SHL16rCL GR16:$src1)>; |
| 1348 | def : Pat<(shl GR32:$src1, (and CL, 31)), |
| 1349 | (SHL32rCL GR32:$src1)>; |
| 1350 | def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst), |
| 1351 | (SHL8mCL addr:$dst)>; |
| 1352 | def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst), |
| 1353 | (SHL16mCL addr:$dst)>; |
| 1354 | def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst), |
| 1355 | (SHL32mCL addr:$dst)>; |
| 1356 | |
| 1357 | def : Pat<(srl GR8:$src1, (and CL, 31)), |
| 1358 | (SHR8rCL GR8:$src1)>; |
| 1359 | def : Pat<(srl GR16:$src1, (and CL, 31)), |
| 1360 | (SHR16rCL GR16:$src1)>; |
| 1361 | def : Pat<(srl GR32:$src1, (and CL, 31)), |
| 1362 | (SHR32rCL GR32:$src1)>; |
| 1363 | def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst), |
| 1364 | (SHR8mCL addr:$dst)>; |
| 1365 | def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst), |
| 1366 | (SHR16mCL addr:$dst)>; |
| 1367 | def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst), |
| 1368 | (SHR32mCL addr:$dst)>; |
| 1369 | |
| 1370 | def : Pat<(sra GR8:$src1, (and CL, 31)), |
| 1371 | (SAR8rCL GR8:$src1)>; |
| 1372 | def : Pat<(sra GR16:$src1, (and CL, 31)), |
| 1373 | (SAR16rCL GR16:$src1)>; |
| 1374 | def : Pat<(sra GR32:$src1, (and CL, 31)), |
| 1375 | (SAR32rCL GR32:$src1)>; |
| 1376 | def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst), |
| 1377 | (SAR8mCL addr:$dst)>; |
| 1378 | def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst), |
| 1379 | (SAR16mCL addr:$dst)>; |
| 1380 | def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst), |
| 1381 | (SAR32mCL addr:$dst)>; |
| 1382 | |
| 1383 | // (shl x (and y, 63)) ==> (shl x, y) |
| 1384 | def : Pat<(shl GR64:$src1, (and CL, 63)), |
| 1385 | (SHL64rCL GR64:$src1)>; |
| 1386 | def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), |
| 1387 | (SHL64mCL addr:$dst)>; |
| 1388 | |
| 1389 | def : Pat<(srl GR64:$src1, (and CL, 63)), |
| 1390 | (SHR64rCL GR64:$src1)>; |
| 1391 | def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), |
| 1392 | (SHR64mCL addr:$dst)>; |
| 1393 | |
| 1394 | def : Pat<(sra GR64:$src1, (and CL, 63)), |
| 1395 | (SAR64rCL GR64:$src1)>; |
| 1396 | def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst), |
| 1397 | (SAR64mCL addr:$dst)>; |
| 1398 | |
| 1399 | |
| 1400 | // (anyext (setcc_carry)) -> (setcc_carry) |
| 1401 | def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 1402 | (SETB_C16r)>; |
| 1403 | def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 1404 | (SETB_C32r)>; |
| 1405 | def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 1406 | (SETB_C32r)>; |
| 1407 | |
Chris Lattner | 99ae665 | 2010-10-08 03:54:52 +0000 | [diff] [blame] | 1408 | |
| 1409 | |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 1410 | |
| 1411 | //===----------------------------------------------------------------------===// |
| 1412 | // EFLAGS-defining Patterns |
| 1413 | //===----------------------------------------------------------------------===// |
| 1414 | |
| 1415 | // add reg, reg |
| 1416 | def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; |
| 1417 | def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; |
| 1418 | def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; |
| 1419 | |
| 1420 | // add reg, mem |
| 1421 | def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), |
| 1422 | (ADD8rm GR8:$src1, addr:$src2)>; |
| 1423 | def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), |
| 1424 | (ADD16rm GR16:$src1, addr:$src2)>; |
| 1425 | def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), |
| 1426 | (ADD32rm GR32:$src1, addr:$src2)>; |
| 1427 | |
| 1428 | // add reg, imm |
| 1429 | def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; |
| 1430 | def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; |
| 1431 | def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; |
| 1432 | def : Pat<(add GR16:$src1, i16immSExt8:$src2), |
| 1433 | (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 1434 | def : Pat<(add GR32:$src1, i32immSExt8:$src2), |
| 1435 | (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 1436 | |
| 1437 | // sub reg, reg |
| 1438 | def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; |
| 1439 | def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; |
| 1440 | def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; |
| 1441 | |
| 1442 | // sub reg, mem |
| 1443 | def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), |
| 1444 | (SUB8rm GR8:$src1, addr:$src2)>; |
| 1445 | def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), |
| 1446 | (SUB16rm GR16:$src1, addr:$src2)>; |
| 1447 | def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), |
| 1448 | (SUB32rm GR32:$src1, addr:$src2)>; |
| 1449 | |
| 1450 | // sub reg, imm |
| 1451 | def : Pat<(sub GR8:$src1, imm:$src2), |
| 1452 | (SUB8ri GR8:$src1, imm:$src2)>; |
| 1453 | def : Pat<(sub GR16:$src1, imm:$src2), |
| 1454 | (SUB16ri GR16:$src1, imm:$src2)>; |
| 1455 | def : Pat<(sub GR32:$src1, imm:$src2), |
| 1456 | (SUB32ri GR32:$src1, imm:$src2)>; |
| 1457 | def : Pat<(sub GR16:$src1, i16immSExt8:$src2), |
| 1458 | (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 1459 | def : Pat<(sub GR32:$src1, i32immSExt8:$src2), |
| 1460 | (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 1461 | |
| 1462 | // mul reg, reg |
| 1463 | def : Pat<(mul GR16:$src1, GR16:$src2), |
| 1464 | (IMUL16rr GR16:$src1, GR16:$src2)>; |
| 1465 | def : Pat<(mul GR32:$src1, GR32:$src2), |
| 1466 | (IMUL32rr GR32:$src1, GR32:$src2)>; |
| 1467 | |
| 1468 | // mul reg, mem |
| 1469 | def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), |
| 1470 | (IMUL16rm GR16:$src1, addr:$src2)>; |
| 1471 | def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), |
| 1472 | (IMUL32rm GR32:$src1, addr:$src2)>; |
| 1473 | |
| 1474 | // mul reg, imm |
| 1475 | def : Pat<(mul GR16:$src1, imm:$src2), |
| 1476 | (IMUL16rri GR16:$src1, imm:$src2)>; |
| 1477 | def : Pat<(mul GR32:$src1, imm:$src2), |
| 1478 | (IMUL32rri GR32:$src1, imm:$src2)>; |
| 1479 | def : Pat<(mul GR16:$src1, i16immSExt8:$src2), |
| 1480 | (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; |
| 1481 | def : Pat<(mul GR32:$src1, i32immSExt8:$src2), |
| 1482 | (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; |
| 1483 | |
| 1484 | // reg = mul mem, imm |
| 1485 | def : Pat<(mul (loadi16 addr:$src1), imm:$src2), |
| 1486 | (IMUL16rmi addr:$src1, imm:$src2)>; |
| 1487 | def : Pat<(mul (loadi32 addr:$src1), imm:$src2), |
| 1488 | (IMUL32rmi addr:$src1, imm:$src2)>; |
| 1489 | def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), |
| 1490 | (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; |
| 1491 | def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), |
| 1492 | (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; |
| 1493 | |
| 1494 | // Optimize multiply by 2 with EFLAGS result. |
| 1495 | let AddedComplexity = 2 in { |
| 1496 | def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>; |
| 1497 | def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>; |
| 1498 | } |
| 1499 | |
| 1500 | // Patterns for nodes that do not produce flags, for instructions that do. |
| 1501 | |
| 1502 | // addition |
| 1503 | def : Pat<(add GR64:$src1, GR64:$src2), |
| 1504 | (ADD64rr GR64:$src1, GR64:$src2)>; |
| 1505 | def : Pat<(add GR64:$src1, i64immSExt8:$src2), |
| 1506 | (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 1507 | def : Pat<(add GR64:$src1, i64immSExt32:$src2), |
| 1508 | (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 1509 | def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), |
| 1510 | (ADD64rm GR64:$src1, addr:$src2)>; |
| 1511 | |
| 1512 | // subtraction |
| 1513 | def : Pat<(sub GR64:$src1, GR64:$src2), |
| 1514 | (SUB64rr GR64:$src1, GR64:$src2)>; |
| 1515 | def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), |
| 1516 | (SUB64rm GR64:$src1, addr:$src2)>; |
| 1517 | def : Pat<(sub GR64:$src1, i64immSExt8:$src2), |
| 1518 | (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 1519 | def : Pat<(sub GR64:$src1, i64immSExt32:$src2), |
| 1520 | (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 1521 | |
| 1522 | // Multiply |
| 1523 | def : Pat<(mul GR64:$src1, GR64:$src2), |
| 1524 | (IMUL64rr GR64:$src1, GR64:$src2)>; |
| 1525 | def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), |
| 1526 | (IMUL64rm GR64:$src1, addr:$src2)>; |
| 1527 | def : Pat<(mul GR64:$src1, i64immSExt8:$src2), |
| 1528 | (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; |
| 1529 | def : Pat<(mul GR64:$src1, i64immSExt32:$src2), |
| 1530 | (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; |
| 1531 | def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), |
| 1532 | (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; |
| 1533 | def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), |
| 1534 | (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; |
| 1535 | |
| 1536 | // Increment reg. |
| 1537 | def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>; |
| 1538 | def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>; |
| 1539 | def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>; |
| 1540 | def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>; |
| 1541 | def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>; |
| 1542 | def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; |
| 1543 | |
| 1544 | // Decrement reg. |
| 1545 | def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>; |
| 1546 | def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>; |
| 1547 | def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>; |
| 1548 | def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>; |
| 1549 | def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>; |
| 1550 | def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; |
| 1551 | |
| 1552 | // or reg/reg. |
| 1553 | def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; |
| 1554 | def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; |
| 1555 | def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; |
| 1556 | def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; |
| 1557 | |
| 1558 | // or reg/mem |
| 1559 | def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), |
| 1560 | (OR8rm GR8:$src1, addr:$src2)>; |
| 1561 | def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), |
| 1562 | (OR16rm GR16:$src1, addr:$src2)>; |
| 1563 | def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), |
| 1564 | (OR32rm GR32:$src1, addr:$src2)>; |
| 1565 | def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), |
| 1566 | (OR64rm GR64:$src1, addr:$src2)>; |
| 1567 | |
| 1568 | // or reg/imm |
| 1569 | def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; |
| 1570 | def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; |
| 1571 | def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; |
| 1572 | def : Pat<(or GR16:$src1, i16immSExt8:$src2), |
| 1573 | (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 1574 | def : Pat<(or GR32:$src1, i32immSExt8:$src2), |
| 1575 | (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 1576 | def : Pat<(or GR64:$src1, i64immSExt8:$src2), |
| 1577 | (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 1578 | def : Pat<(or GR64:$src1, i64immSExt32:$src2), |
| 1579 | (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 1580 | |
| 1581 | // xor reg/reg |
| 1582 | def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; |
| 1583 | def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; |
| 1584 | def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; |
| 1585 | def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; |
| 1586 | |
| 1587 | // xor reg/mem |
| 1588 | def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), |
| 1589 | (XOR8rm GR8:$src1, addr:$src2)>; |
| 1590 | def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), |
| 1591 | (XOR16rm GR16:$src1, addr:$src2)>; |
| 1592 | def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), |
| 1593 | (XOR32rm GR32:$src1, addr:$src2)>; |
| 1594 | def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), |
| 1595 | (XOR64rm GR64:$src1, addr:$src2)>; |
| 1596 | |
| 1597 | // xor reg/imm |
| 1598 | def : Pat<(xor GR8:$src1, imm:$src2), |
| 1599 | (XOR8ri GR8:$src1, imm:$src2)>; |
| 1600 | def : Pat<(xor GR16:$src1, imm:$src2), |
| 1601 | (XOR16ri GR16:$src1, imm:$src2)>; |
| 1602 | def : Pat<(xor GR32:$src1, imm:$src2), |
| 1603 | (XOR32ri GR32:$src1, imm:$src2)>; |
| 1604 | def : Pat<(xor GR16:$src1, i16immSExt8:$src2), |
| 1605 | (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 1606 | def : Pat<(xor GR32:$src1, i32immSExt8:$src2), |
| 1607 | (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 1608 | def : Pat<(xor GR64:$src1, i64immSExt8:$src2), |
| 1609 | (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 1610 | def : Pat<(xor GR64:$src1, i64immSExt32:$src2), |
| 1611 | (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 1612 | |
| 1613 | // and reg/reg |
| 1614 | def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; |
| 1615 | def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; |
| 1616 | def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; |
| 1617 | def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; |
| 1618 | |
| 1619 | // and reg/mem |
| 1620 | def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), |
| 1621 | (AND8rm GR8:$src1, addr:$src2)>; |
| 1622 | def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), |
| 1623 | (AND16rm GR16:$src1, addr:$src2)>; |
| 1624 | def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), |
| 1625 | (AND32rm GR32:$src1, addr:$src2)>; |
| 1626 | def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), |
| 1627 | (AND64rm GR64:$src1, addr:$src2)>; |
| 1628 | |
| 1629 | // and reg/imm |
| 1630 | def : Pat<(and GR8:$src1, imm:$src2), |
| 1631 | (AND8ri GR8:$src1, imm:$src2)>; |
| 1632 | def : Pat<(and GR16:$src1, imm:$src2), |
| 1633 | (AND16ri GR16:$src1, imm:$src2)>; |
| 1634 | def : Pat<(and GR32:$src1, imm:$src2), |
| 1635 | (AND32ri GR32:$src1, imm:$src2)>; |
| 1636 | def : Pat<(and GR16:$src1, i16immSExt8:$src2), |
| 1637 | (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 1638 | def : Pat<(and GR32:$src1, i32immSExt8:$src2), |
| 1639 | (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 1640 | def : Pat<(and GR64:$src1, i64immSExt8:$src2), |
| 1641 | (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 1642 | def : Pat<(and GR64:$src1, i64immSExt32:$src2), |
| 1643 | (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; |
Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame] | 1644 | |