Chris Lattner | 87be16a | 2010-10-05 06:04:14 +0000 | [diff] [blame^] | 1 | //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file describes the various pseudo instructions used by the compiler, |
| 11 | // as well as Pat patterns used during instruction selection. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | // EH Pseudo Instructions |
| 18 | // |
| 19 | let isTerminator = 1, isReturn = 1, isBarrier = 1, |
| 20 | hasCtrlDep = 1, isCodeGenOnly = 1 in { |
| 21 | def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), |
| 22 | "ret\t#eh_return, addr: $addr", |
| 23 | [(X86ehret GR32:$addr)]>; |
| 24 | |
| 25 | } |
| 26 | |
| 27 | let isTerminator = 1, isReturn = 1, isBarrier = 1, |
| 28 | hasCtrlDep = 1, isCodeGenOnly = 1 in { |
| 29 | def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), |
| 30 | "ret\t#eh_return, addr: $addr", |
| 31 | [(X86ehret GR64:$addr)]>; |
| 32 | |
| 33 | } |
| 34 | |
| 35 | |
| 36 | //===----------------------------------------------------------------------===// |
| 37 | // Non-Instruction Patterns |
| 38 | //===----------------------------------------------------------------------===// |
| 39 | |
| 40 | // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable |
| 41 | def : Pat<(i32 (X86Wrapper tconstpool :$dst)), (MOV32ri tconstpool :$dst)>; |
| 42 | def : Pat<(i32 (X86Wrapper tjumptable :$dst)), (MOV32ri tjumptable :$dst)>; |
| 43 | def : Pat<(i32 (X86Wrapper tglobaltlsaddr:$dst)),(MOV32ri tglobaltlsaddr:$dst)>; |
| 44 | def : Pat<(i32 (X86Wrapper tglobaladdr :$dst)), (MOV32ri tglobaladdr :$dst)>; |
| 45 | def : Pat<(i32 (X86Wrapper texternalsym:$dst)), (MOV32ri texternalsym:$dst)>; |
| 46 | def : Pat<(i32 (X86Wrapper tblockaddress:$dst)), (MOV32ri tblockaddress:$dst)>; |
| 47 | |
| 48 | def : Pat<(add GR32:$src1, (X86Wrapper tconstpool:$src2)), |
| 49 | (ADD32ri GR32:$src1, tconstpool:$src2)>; |
| 50 | def : Pat<(add GR32:$src1, (X86Wrapper tjumptable:$src2)), |
| 51 | (ADD32ri GR32:$src1, tjumptable:$src2)>; |
| 52 | def : Pat<(add GR32:$src1, (X86Wrapper tglobaladdr :$src2)), |
| 53 | (ADD32ri GR32:$src1, tglobaladdr:$src2)>; |
| 54 | def : Pat<(add GR32:$src1, (X86Wrapper texternalsym:$src2)), |
| 55 | (ADD32ri GR32:$src1, texternalsym:$src2)>; |
| 56 | def : Pat<(add GR32:$src1, (X86Wrapper tblockaddress:$src2)), |
| 57 | (ADD32ri GR32:$src1, tblockaddress:$src2)>; |
| 58 | |
| 59 | def : Pat<(store (i32 (X86Wrapper tglobaladdr:$src)), addr:$dst), |
| 60 | (MOV32mi addr:$dst, tglobaladdr:$src)>; |
| 61 | def : Pat<(store (i32 (X86Wrapper texternalsym:$src)), addr:$dst), |
| 62 | (MOV32mi addr:$dst, texternalsym:$src)>; |
| 63 | def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst), |
| 64 | (MOV32mi addr:$dst, tblockaddress:$src)>; |
| 65 | |
| 66 | |
| 67 | |
| 68 | // ConstantPool GlobalAddress, ExternalSymbol, and JumpTable when not in small |
| 69 | // code model mode, should use 'movabs'. FIXME: This is really a hack, the |
| 70 | // 'movabs' predicate should handle this sort of thing. |
| 71 | def : Pat<(i64 (X86Wrapper tconstpool :$dst)), |
| 72 | (MOV64ri tconstpool :$dst)>, Requires<[FarData]>; |
| 73 | def : Pat<(i64 (X86Wrapper tjumptable :$dst)), |
| 74 | (MOV64ri tjumptable :$dst)>, Requires<[FarData]>; |
| 75 | def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), |
| 76 | (MOV64ri tglobaladdr :$dst)>, Requires<[FarData]>; |
| 77 | def : Pat<(i64 (X86Wrapper texternalsym:$dst)), |
| 78 | (MOV64ri texternalsym:$dst)>, Requires<[FarData]>; |
| 79 | def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), |
| 80 | (MOV64ri tblockaddress:$dst)>, Requires<[FarData]>; |
| 81 | |
| 82 | // In static codegen with small code model, we can get the address of a label |
| 83 | // into a register with 'movl'. FIXME: This is a hack, the 'imm' predicate of |
| 84 | // the MOV64ri64i32 should accept these. |
| 85 | def : Pat<(i64 (X86Wrapper tconstpool :$dst)), |
| 86 | (MOV64ri64i32 tconstpool :$dst)>, Requires<[SmallCode]>; |
| 87 | def : Pat<(i64 (X86Wrapper tjumptable :$dst)), |
| 88 | (MOV64ri64i32 tjumptable :$dst)>, Requires<[SmallCode]>; |
| 89 | def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), |
| 90 | (MOV64ri64i32 tglobaladdr :$dst)>, Requires<[SmallCode]>; |
| 91 | def : Pat<(i64 (X86Wrapper texternalsym:$dst)), |
| 92 | (MOV64ri64i32 texternalsym:$dst)>, Requires<[SmallCode]>; |
| 93 | def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), |
| 94 | (MOV64ri64i32 tblockaddress:$dst)>, Requires<[SmallCode]>; |
| 95 | |
| 96 | // In kernel code model, we can get the address of a label |
| 97 | // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of |
| 98 | // the MOV64ri32 should accept these. |
| 99 | def : Pat<(i64 (X86Wrapper tconstpool :$dst)), |
| 100 | (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; |
| 101 | def : Pat<(i64 (X86Wrapper tjumptable :$dst)), |
| 102 | (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; |
| 103 | def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), |
| 104 | (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; |
| 105 | def : Pat<(i64 (X86Wrapper texternalsym:$dst)), |
| 106 | (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; |
| 107 | def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), |
| 108 | (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; |
| 109 | |
| 110 | // If we have small model and -static mode, it is safe to store global addresses |
| 111 | // directly as immediates. FIXME: This is really a hack, the 'imm' predicate |
| 112 | // for MOV64mi32 should handle this sort of thing. |
| 113 | def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), |
| 114 | (MOV64mi32 addr:$dst, tconstpool:$src)>, |
| 115 | Requires<[NearData, IsStatic]>; |
| 116 | def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), |
| 117 | (MOV64mi32 addr:$dst, tjumptable:$src)>, |
| 118 | Requires<[NearData, IsStatic]>; |
| 119 | def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), |
| 120 | (MOV64mi32 addr:$dst, tglobaladdr:$src)>, |
| 121 | Requires<[NearData, IsStatic]>; |
| 122 | def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), |
| 123 | (MOV64mi32 addr:$dst, texternalsym:$src)>, |
| 124 | Requires<[NearData, IsStatic]>; |
| 125 | def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), |
| 126 | (MOV64mi32 addr:$dst, tblockaddress:$src)>, |
| 127 | Requires<[NearData, IsStatic]>; |
| 128 | |
| 129 | |
| 130 | |
| 131 | // Calls |
| 132 | |
| 133 | // tls has some funny stuff here... |
| 134 | // This corresponds to movabs $foo@tpoff, %rax |
| 135 | def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), |
| 136 | (MOV64ri tglobaltlsaddr :$dst)>; |
| 137 | // This corresponds to add $foo@tpoff, %rax |
| 138 | def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), |
| 139 | (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; |
| 140 | // This corresponds to mov foo@tpoff(%rbx), %eax |
| 141 | def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))), |
| 142 | (MOV64rm tglobaltlsaddr :$dst)>; |
| 143 | |
| 144 | |
| 145 | // Direct PC relative function call for small code model. 32-bit displacement |
| 146 | // sign extended to 64-bit. |
| 147 | def : Pat<(X86call (i64 tglobaladdr:$dst)), |
| 148 | (CALL64pcrel32 tglobaladdr:$dst)>, Requires<[NotWin64]>; |
| 149 | def : Pat<(X86call (i64 texternalsym:$dst)), |
| 150 | (CALL64pcrel32 texternalsym:$dst)>, Requires<[NotWin64]>; |
| 151 | |
| 152 | def : Pat<(X86call (i64 tglobaladdr:$dst)), |
| 153 | (WINCALL64pcrel32 tglobaladdr:$dst)>, Requires<[IsWin64]>; |
| 154 | def : Pat<(X86call (i64 texternalsym:$dst)), |
| 155 | (WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>; |
| 156 | |
| 157 | // tailcall stuff |
| 158 | def : Pat<(X86tcret GR32_TC:$dst, imm:$off), |
| 159 | (TCRETURNri GR32_TC:$dst, imm:$off)>, |
| 160 | Requires<[In32BitMode]>; |
| 161 | |
| 162 | // FIXME: This is disabled for 32-bit PIC mode because the global base |
| 163 | // register which is part of the address mode may be assigned a |
| 164 | // callee-saved register. |
| 165 | def : Pat<(X86tcret (load addr:$dst), imm:$off), |
| 166 | (TCRETURNmi addr:$dst, imm:$off)>, |
| 167 | Requires<[In32BitMode, IsNotPIC]>; |
| 168 | |
| 169 | def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), |
| 170 | (TCRETURNdi texternalsym:$dst, imm:$off)>, |
| 171 | Requires<[In32BitMode]>; |
| 172 | |
| 173 | def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), |
| 174 | (TCRETURNdi texternalsym:$dst, imm:$off)>, |
| 175 | Requires<[In32BitMode]>; |
| 176 | |
| 177 | def : Pat<(X86tcret GR64_TC:$dst, imm:$off), |
| 178 | (TCRETURNri64 GR64_TC:$dst, imm:$off)>, |
| 179 | Requires<[In64BitMode]>; |
| 180 | |
| 181 | def : Pat<(X86tcret (load addr:$dst), imm:$off), |
| 182 | (TCRETURNmi64 addr:$dst, imm:$off)>, |
| 183 | Requires<[In64BitMode]>; |
| 184 | |
| 185 | def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off), |
| 186 | (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>, |
| 187 | Requires<[In64BitMode]>; |
| 188 | |
| 189 | def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), |
| 190 | (TCRETURNdi64 texternalsym:$dst, imm:$off)>, |
| 191 | Requires<[In64BitMode]>; |
| 192 | |
| 193 | // Normal calls, with various flavors of addresses. |
| 194 | def : Pat<(X86call (i32 tglobaladdr:$dst)), |
| 195 | (CALLpcrel32 tglobaladdr:$dst)>; |
| 196 | def : Pat<(X86call (i32 texternalsym:$dst)), |
| 197 | (CALLpcrel32 texternalsym:$dst)>; |
| 198 | def : Pat<(X86call (i32 imm:$dst)), |
| 199 | (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; |
| 200 | |
| 201 | // X86 specific add which produces a flag. |
| 202 | def : Pat<(addc GR32:$src1, GR32:$src2), |
| 203 | (ADD32rr GR32:$src1, GR32:$src2)>; |
| 204 | def : Pat<(addc GR32:$src1, (load addr:$src2)), |
| 205 | (ADD32rm GR32:$src1, addr:$src2)>; |
| 206 | def : Pat<(addc GR32:$src1, imm:$src2), |
| 207 | (ADD32ri GR32:$src1, imm:$src2)>; |
| 208 | def : Pat<(addc GR32:$src1, i32immSExt8:$src2), |
| 209 | (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 210 | |
| 211 | def : Pat<(addc GR64:$src1, GR64:$src2), |
| 212 | (ADD64rr GR64:$src1, GR64:$src2)>; |
| 213 | def : Pat<(addc GR64:$src1, (load addr:$src2)), |
| 214 | (ADD64rm GR64:$src1, addr:$src2)>; |
| 215 | def : Pat<(addc GR64:$src1, i64immSExt8:$src2), |
| 216 | (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 217 | def : Pat<(addc GR64:$src1, i64immSExt32:$src2), |
| 218 | (ADD64ri32 GR64:$src1, imm:$src2)>; |
| 219 | |
| 220 | def : Pat<(subc GR32:$src1, GR32:$src2), |
| 221 | (SUB32rr GR32:$src1, GR32:$src2)>; |
| 222 | def : Pat<(subc GR32:$src1, (load addr:$src2)), |
| 223 | (SUB32rm GR32:$src1, addr:$src2)>; |
| 224 | def : Pat<(subc GR32:$src1, imm:$src2), |
| 225 | (SUB32ri GR32:$src1, imm:$src2)>; |
| 226 | def : Pat<(subc GR32:$src1, i32immSExt8:$src2), |
| 227 | (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 228 | |
| 229 | def : Pat<(subc GR64:$src1, GR64:$src2), |
| 230 | (SUB64rr GR64:$src1, GR64:$src2)>; |
| 231 | def : Pat<(subc GR64:$src1, (load addr:$src2)), |
| 232 | (SUB64rm GR64:$src1, addr:$src2)>; |
| 233 | def : Pat<(subc GR64:$src1, i64immSExt8:$src2), |
| 234 | (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 235 | def : Pat<(subc GR64:$src1, imm:$src2), |
| 236 | (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 237 | |
| 238 | // Comparisons. |
| 239 | |
| 240 | // TEST R,R is smaller than CMP R,0 |
| 241 | def : Pat<(X86cmp GR8:$src1, 0), |
| 242 | (TEST8rr GR8:$src1, GR8:$src1)>; |
| 243 | def : Pat<(X86cmp GR16:$src1, 0), |
| 244 | (TEST16rr GR16:$src1, GR16:$src1)>; |
| 245 | def : Pat<(X86cmp GR32:$src1, 0), |
| 246 | (TEST32rr GR32:$src1, GR32:$src1)>; |
| 247 | def : Pat<(X86cmp GR64:$src1, 0), |
| 248 | (TEST64rr GR64:$src1, GR64:$src1)>; |
| 249 | |
| 250 | // Conditional moves with folded loads with operands swapped and conditions |
| 251 | // inverted. |
| 252 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_B, EFLAGS), |
| 253 | (CMOVAE16rm GR16:$src2, addr:$src1)>; |
| 254 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_B, EFLAGS), |
| 255 | (CMOVAE32rm GR32:$src2, addr:$src1)>; |
| 256 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_AE, EFLAGS), |
| 257 | (CMOVB16rm GR16:$src2, addr:$src1)>; |
| 258 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_AE, EFLAGS), |
| 259 | (CMOVB32rm GR32:$src2, addr:$src1)>; |
| 260 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_E, EFLAGS), |
| 261 | (CMOVNE16rm GR16:$src2, addr:$src1)>; |
| 262 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_E, EFLAGS), |
| 263 | (CMOVNE32rm GR32:$src2, addr:$src1)>; |
| 264 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NE, EFLAGS), |
| 265 | (CMOVE16rm GR16:$src2, addr:$src1)>; |
| 266 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NE, EFLAGS), |
| 267 | (CMOVE32rm GR32:$src2, addr:$src1)>; |
| 268 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_BE, EFLAGS), |
| 269 | (CMOVA16rm GR16:$src2, addr:$src1)>; |
| 270 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_BE, EFLAGS), |
| 271 | (CMOVA32rm GR32:$src2, addr:$src1)>; |
| 272 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_A, EFLAGS), |
| 273 | (CMOVBE16rm GR16:$src2, addr:$src1)>; |
| 274 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_A, EFLAGS), |
| 275 | (CMOVBE32rm GR32:$src2, addr:$src1)>; |
| 276 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_L, EFLAGS), |
| 277 | (CMOVGE16rm GR16:$src2, addr:$src1)>; |
| 278 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_L, EFLAGS), |
| 279 | (CMOVGE32rm GR32:$src2, addr:$src1)>; |
| 280 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_GE, EFLAGS), |
| 281 | (CMOVL16rm GR16:$src2, addr:$src1)>; |
| 282 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_GE, EFLAGS), |
| 283 | (CMOVL32rm GR32:$src2, addr:$src1)>; |
| 284 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_LE, EFLAGS), |
| 285 | (CMOVG16rm GR16:$src2, addr:$src1)>; |
| 286 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_LE, EFLAGS), |
| 287 | (CMOVG32rm GR32:$src2, addr:$src1)>; |
| 288 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_G, EFLAGS), |
| 289 | (CMOVLE16rm GR16:$src2, addr:$src1)>; |
| 290 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_G, EFLAGS), |
| 291 | (CMOVLE32rm GR32:$src2, addr:$src1)>; |
| 292 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_P, EFLAGS), |
| 293 | (CMOVNP16rm GR16:$src2, addr:$src1)>; |
| 294 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_P, EFLAGS), |
| 295 | (CMOVNP32rm GR32:$src2, addr:$src1)>; |
| 296 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NP, EFLAGS), |
| 297 | (CMOVP16rm GR16:$src2, addr:$src1)>; |
| 298 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NP, EFLAGS), |
| 299 | (CMOVP32rm GR32:$src2, addr:$src1)>; |
| 300 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_S, EFLAGS), |
| 301 | (CMOVNS16rm GR16:$src2, addr:$src1)>; |
| 302 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_S, EFLAGS), |
| 303 | (CMOVNS32rm GR32:$src2, addr:$src1)>; |
| 304 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NS, EFLAGS), |
| 305 | (CMOVS16rm GR16:$src2, addr:$src1)>; |
| 306 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NS, EFLAGS), |
| 307 | (CMOVS32rm GR32:$src2, addr:$src1)>; |
| 308 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_O, EFLAGS), |
| 309 | (CMOVNO16rm GR16:$src2, addr:$src1)>; |
| 310 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_O, EFLAGS), |
| 311 | (CMOVNO32rm GR32:$src2, addr:$src1)>; |
| 312 | def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, X86_COND_NO, EFLAGS), |
| 313 | (CMOVO16rm GR16:$src2, addr:$src1)>; |
| 314 | def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, X86_COND_NO, EFLAGS), |
| 315 | (CMOVO32rm GR32:$src2, addr:$src1)>; |
| 316 | |
| 317 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_B, EFLAGS), |
| 318 | (CMOVAE64rm GR64:$src2, addr:$src1)>; |
| 319 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_AE, EFLAGS), |
| 320 | (CMOVB64rm GR64:$src2, addr:$src1)>; |
| 321 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_E, EFLAGS), |
| 322 | (CMOVNE64rm GR64:$src2, addr:$src1)>; |
| 323 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NE, EFLAGS), |
| 324 | (CMOVE64rm GR64:$src2, addr:$src1)>; |
| 325 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_BE, EFLAGS), |
| 326 | (CMOVA64rm GR64:$src2, addr:$src1)>; |
| 327 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_A, EFLAGS), |
| 328 | (CMOVBE64rm GR64:$src2, addr:$src1)>; |
| 329 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_L, EFLAGS), |
| 330 | (CMOVGE64rm GR64:$src2, addr:$src1)>; |
| 331 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_GE, EFLAGS), |
| 332 | (CMOVL64rm GR64:$src2, addr:$src1)>; |
| 333 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_LE, EFLAGS), |
| 334 | (CMOVG64rm GR64:$src2, addr:$src1)>; |
| 335 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_G, EFLAGS), |
| 336 | (CMOVLE64rm GR64:$src2, addr:$src1)>; |
| 337 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_P, EFLAGS), |
| 338 | (CMOVNP64rm GR64:$src2, addr:$src1)>; |
| 339 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NP, EFLAGS), |
| 340 | (CMOVP64rm GR64:$src2, addr:$src1)>; |
| 341 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_S, EFLAGS), |
| 342 | (CMOVNS64rm GR64:$src2, addr:$src1)>; |
| 343 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NS, EFLAGS), |
| 344 | (CMOVS64rm GR64:$src2, addr:$src1)>; |
| 345 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_O, EFLAGS), |
| 346 | (CMOVNO64rm GR64:$src2, addr:$src1)>; |
| 347 | def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS), |
| 348 | (CMOVO64rm GR64:$src2, addr:$src1)>; |
| 349 | |
| 350 | |
| 351 | // zextload bool -> zextload byte |
| 352 | def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; |
| 353 | def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; |
| 354 | def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; |
| 355 | def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; |
| 356 | |
| 357 | // extload bool -> extload byte |
| 358 | // When extloading from 16-bit and smaller memory locations into 64-bit |
| 359 | // registers, use zero-extending loads so that the entire 64-bit register is |
| 360 | // defined, avoiding partial-register updates. |
| 361 | |
| 362 | def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; |
| 363 | def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>; |
| 364 | def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; |
| 365 | def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>; |
| 366 | def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; |
| 367 | def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; |
| 368 | |
| 369 | def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; |
| 370 | def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; |
| 371 | def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; |
| 372 | // For other extloads, use subregs, since the high contents of the register are |
| 373 | // defined after an extload. |
| 374 | def : Pat<(extloadi64i32 addr:$src), |
| 375 | (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), |
| 376 | sub_32bit)>; |
| 377 | |
| 378 | // anyext. Define these to do an explicit zero-extend to |
| 379 | // avoid partial-register updates. |
| 380 | def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>; |
| 381 | def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; |
| 382 | |
| 383 | // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. |
| 384 | def : Pat<(i32 (anyext GR16:$src)), |
| 385 | (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; |
| 386 | |
| 387 | def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>; |
| 388 | def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>; |
| 389 | def : Pat<(i64 (anyext GR32:$src)), |
| 390 | (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; |
| 391 | |
| 392 | //===----------------------------------------------------------------------===// |
| 393 | // Some peepholes |
| 394 | //===----------------------------------------------------------------------===// |
| 395 | |
| 396 | // Odd encoding trick: -128 fits into an 8-bit immediate field while |
| 397 | // +128 doesn't, so in this special case use a sub instead of an add. |
| 398 | def : Pat<(add GR16:$src1, 128), |
| 399 | (SUB16ri8 GR16:$src1, -128)>; |
| 400 | def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), |
| 401 | (SUB16mi8 addr:$dst, -128)>; |
| 402 | |
| 403 | def : Pat<(add GR32:$src1, 128), |
| 404 | (SUB32ri8 GR32:$src1, -128)>; |
| 405 | def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), |
| 406 | (SUB32mi8 addr:$dst, -128)>; |
| 407 | |
| 408 | def : Pat<(add GR64:$src1, 128), |
| 409 | (SUB64ri8 GR64:$src1, -128)>; |
| 410 | def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), |
| 411 | (SUB64mi8 addr:$dst, -128)>; |
| 412 | |
| 413 | // The same trick applies for 32-bit immediate fields in 64-bit |
| 414 | // instructions. |
| 415 | def : Pat<(add GR64:$src1, 0x0000000080000000), |
| 416 | (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; |
| 417 | def : Pat<(store (add (loadi64 addr:$dst), 0x00000000800000000), addr:$dst), |
| 418 | (SUB64mi32 addr:$dst, 0xffffffff80000000)>; |
| 419 | |
| 420 | // Use a 32-bit and with implicit zero-extension instead of a 64-bit and if it |
| 421 | // has an immediate with at least 32 bits of leading zeros, to avoid needing to |
| 422 | // materialize that immediate in a register first. |
| 423 | def : Pat<(and GR64:$src, i64immZExt32:$imm), |
| 424 | (SUBREG_TO_REG |
| 425 | (i64 0), |
| 426 | (AND32ri |
| 427 | (EXTRACT_SUBREG GR64:$src, sub_32bit), |
| 428 | (i32 (GetLo32XForm imm:$imm))), |
| 429 | sub_32bit)>; |
| 430 | |
| 431 | |
| 432 | // r & (2^16-1) ==> movz |
| 433 | def : Pat<(and GR32:$src1, 0xffff), |
| 434 | (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; |
| 435 | // r & (2^8-1) ==> movz |
| 436 | def : Pat<(and GR32:$src1, 0xff), |
| 437 | (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1, |
| 438 | GR32_ABCD)), |
| 439 | sub_8bit))>, |
| 440 | Requires<[In32BitMode]>; |
| 441 | // r & (2^8-1) ==> movz |
| 442 | def : Pat<(and GR16:$src1, 0xff), |
| 443 | (MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1, |
| 444 | GR16_ABCD)), |
| 445 | sub_8bit))>, |
| 446 | Requires<[In32BitMode]>; |
| 447 | |
| 448 | // r & (2^32-1) ==> movz |
| 449 | def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), |
| 450 | (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; |
| 451 | // r & (2^16-1) ==> movz |
| 452 | def : Pat<(and GR64:$src, 0xffff), |
| 453 | (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>; |
| 454 | // r & (2^8-1) ==> movz |
| 455 | def : Pat<(and GR64:$src, 0xff), |
| 456 | (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>; |
| 457 | // r & (2^8-1) ==> movz |
| 458 | def : Pat<(and GR32:$src1, 0xff), |
| 459 | (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>, |
| 460 | Requires<[In64BitMode]>; |
| 461 | // r & (2^8-1) ==> movz |
| 462 | def : Pat<(and GR16:$src1, 0xff), |
| 463 | (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>, |
| 464 | Requires<[In64BitMode]>; |
| 465 | |
| 466 | |
| 467 | // sext_inreg patterns |
| 468 | def : Pat<(sext_inreg GR32:$src, i16), |
| 469 | (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; |
| 470 | def : Pat<(sext_inreg GR32:$src, i8), |
| 471 | (MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 472 | GR32_ABCD)), |
| 473 | sub_8bit))>, |
| 474 | Requires<[In32BitMode]>; |
| 475 | def : Pat<(sext_inreg GR16:$src, i8), |
| 476 | (MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, |
| 477 | GR16_ABCD)), |
| 478 | sub_8bit))>, |
| 479 | Requires<[In32BitMode]>; |
| 480 | |
| 481 | def : Pat<(sext_inreg GR64:$src, i32), |
| 482 | (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; |
| 483 | def : Pat<(sext_inreg GR64:$src, i16), |
| 484 | (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; |
| 485 | def : Pat<(sext_inreg GR64:$src, i8), |
| 486 | (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; |
| 487 | def : Pat<(sext_inreg GR32:$src, i8), |
| 488 | (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>, |
| 489 | Requires<[In64BitMode]>; |
| 490 | def : Pat<(sext_inreg GR16:$src, i8), |
| 491 | (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>, |
| 492 | Requires<[In64BitMode]>; |
| 493 | |
| 494 | |
| 495 | // trunc patterns |
| 496 | def : Pat<(i16 (trunc GR32:$src)), |
| 497 | (EXTRACT_SUBREG GR32:$src, sub_16bit)>; |
| 498 | def : Pat<(i8 (trunc GR32:$src)), |
| 499 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 500 | sub_8bit)>, |
| 501 | Requires<[In32BitMode]>; |
| 502 | def : Pat<(i8 (trunc GR16:$src)), |
| 503 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 504 | sub_8bit)>, |
| 505 | Requires<[In32BitMode]>; |
| 506 | def : Pat<(i32 (trunc GR64:$src)), |
| 507 | (EXTRACT_SUBREG GR64:$src, sub_32bit)>; |
| 508 | def : Pat<(i16 (trunc GR64:$src)), |
| 509 | (EXTRACT_SUBREG GR64:$src, sub_16bit)>; |
| 510 | def : Pat<(i8 (trunc GR64:$src)), |
| 511 | (EXTRACT_SUBREG GR64:$src, sub_8bit)>; |
| 512 | def : Pat<(i8 (trunc GR32:$src)), |
| 513 | (EXTRACT_SUBREG GR32:$src, sub_8bit)>, |
| 514 | Requires<[In64BitMode]>; |
| 515 | def : Pat<(i8 (trunc GR16:$src)), |
| 516 | (EXTRACT_SUBREG GR16:$src, sub_8bit)>, |
| 517 | Requires<[In64BitMode]>; |
| 518 | |
| 519 | // h-register tricks |
| 520 | def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), |
| 521 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 522 | sub_8bit_hi)>, |
| 523 | Requires<[In32BitMode]>; |
| 524 | def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), |
| 525 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 526 | sub_8bit_hi)>, |
| 527 | Requires<[In32BitMode]>; |
| 528 | def : Pat<(srl GR16:$src, (i8 8)), |
| 529 | (EXTRACT_SUBREG |
| 530 | (MOVZX32rr8 |
| 531 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 532 | sub_8bit_hi)), |
| 533 | sub_16bit)>, |
| 534 | Requires<[In32BitMode]>; |
| 535 | def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), |
| 536 | (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, |
| 537 | GR16_ABCD)), |
| 538 | sub_8bit_hi))>, |
| 539 | Requires<[In32BitMode]>; |
| 540 | def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), |
| 541 | (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, |
| 542 | GR16_ABCD)), |
| 543 | sub_8bit_hi))>, |
| 544 | Requires<[In32BitMode]>; |
| 545 | def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), |
| 546 | (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 547 | GR32_ABCD)), |
| 548 | sub_8bit_hi))>, |
| 549 | Requires<[In32BitMode]>; |
| 550 | def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)), |
| 551 | (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 552 | GR32_ABCD)), |
| 553 | sub_8bit_hi))>, |
| 554 | Requires<[In32BitMode]>; |
| 555 | |
| 556 | // h-register tricks. |
| 557 | // For now, be conservative on x86-64 and use an h-register extract only if the |
| 558 | // value is immediately zero-extended or stored, which are somewhat common |
| 559 | // cases. This uses a bunch of code to prevent a register requiring a REX prefix |
| 560 | // from being allocated in the same instruction as the h register, as there's |
| 561 | // currently no way to describe this requirement to the register allocator. |
| 562 | |
| 563 | // h-register extract and zero-extend. |
| 564 | def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), |
| 565 | (SUBREG_TO_REG |
| 566 | (i64 0), |
| 567 | (MOVZX32_NOREXrr8 |
| 568 | (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)), |
| 569 | sub_8bit_hi)), |
| 570 | sub_32bit)>; |
| 571 | def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), |
| 572 | (MOVZX32_NOREXrr8 |
| 573 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 574 | sub_8bit_hi))>, |
| 575 | Requires<[In64BitMode]>; |
| 576 | def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)), |
| 577 | (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, |
| 578 | GR32_ABCD)), |
| 579 | sub_8bit_hi))>, |
| 580 | Requires<[In64BitMode]>; |
| 581 | def : Pat<(srl GR16:$src, (i8 8)), |
| 582 | (EXTRACT_SUBREG |
| 583 | (MOVZX32_NOREXrr8 |
| 584 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 585 | sub_8bit_hi)), |
| 586 | sub_16bit)>, |
| 587 | Requires<[In64BitMode]>; |
| 588 | def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), |
| 589 | (MOVZX32_NOREXrr8 |
| 590 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 591 | sub_8bit_hi))>, |
| 592 | Requires<[In64BitMode]>; |
| 593 | def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), |
| 594 | (MOVZX32_NOREXrr8 |
| 595 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 596 | sub_8bit_hi))>, |
| 597 | Requires<[In64BitMode]>; |
| 598 | def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), |
| 599 | (SUBREG_TO_REG |
| 600 | (i64 0), |
| 601 | (MOVZX32_NOREXrr8 |
| 602 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 603 | sub_8bit_hi)), |
| 604 | sub_32bit)>; |
| 605 | def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), |
| 606 | (SUBREG_TO_REG |
| 607 | (i64 0), |
| 608 | (MOVZX32_NOREXrr8 |
| 609 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 610 | sub_8bit_hi)), |
| 611 | sub_32bit)>; |
| 612 | |
| 613 | // h-register extract and store. |
| 614 | def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), |
| 615 | (MOV8mr_NOREX |
| 616 | addr:$dst, |
| 617 | (EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)), |
| 618 | sub_8bit_hi))>; |
| 619 | def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), |
| 620 | (MOV8mr_NOREX |
| 621 | addr:$dst, |
| 622 | (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), |
| 623 | sub_8bit_hi))>, |
| 624 | Requires<[In64BitMode]>; |
| 625 | def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), |
| 626 | (MOV8mr_NOREX |
| 627 | addr:$dst, |
| 628 | (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), |
| 629 | sub_8bit_hi))>, |
| 630 | Requires<[In64BitMode]>; |
| 631 | |
| 632 | |
| 633 | // (shl x, 1) ==> (add x, x) |
| 634 | def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; |
| 635 | def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; |
| 636 | def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; |
| 637 | def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; |
| 638 | |
| 639 | // (shl x (and y, 31)) ==> (shl x, y) |
| 640 | def : Pat<(shl GR8:$src1, (and CL, 31)), |
| 641 | (SHL8rCL GR8:$src1)>; |
| 642 | def : Pat<(shl GR16:$src1, (and CL, 31)), |
| 643 | (SHL16rCL GR16:$src1)>; |
| 644 | def : Pat<(shl GR32:$src1, (and CL, 31)), |
| 645 | (SHL32rCL GR32:$src1)>; |
| 646 | def : Pat<(store (shl (loadi8 addr:$dst), (and CL, 31)), addr:$dst), |
| 647 | (SHL8mCL addr:$dst)>; |
| 648 | def : Pat<(store (shl (loadi16 addr:$dst), (and CL, 31)), addr:$dst), |
| 649 | (SHL16mCL addr:$dst)>; |
| 650 | def : Pat<(store (shl (loadi32 addr:$dst), (and CL, 31)), addr:$dst), |
| 651 | (SHL32mCL addr:$dst)>; |
| 652 | |
| 653 | def : Pat<(srl GR8:$src1, (and CL, 31)), |
| 654 | (SHR8rCL GR8:$src1)>; |
| 655 | def : Pat<(srl GR16:$src1, (and CL, 31)), |
| 656 | (SHR16rCL GR16:$src1)>; |
| 657 | def : Pat<(srl GR32:$src1, (and CL, 31)), |
| 658 | (SHR32rCL GR32:$src1)>; |
| 659 | def : Pat<(store (srl (loadi8 addr:$dst), (and CL, 31)), addr:$dst), |
| 660 | (SHR8mCL addr:$dst)>; |
| 661 | def : Pat<(store (srl (loadi16 addr:$dst), (and CL, 31)), addr:$dst), |
| 662 | (SHR16mCL addr:$dst)>; |
| 663 | def : Pat<(store (srl (loadi32 addr:$dst), (and CL, 31)), addr:$dst), |
| 664 | (SHR32mCL addr:$dst)>; |
| 665 | |
| 666 | def : Pat<(sra GR8:$src1, (and CL, 31)), |
| 667 | (SAR8rCL GR8:$src1)>; |
| 668 | def : Pat<(sra GR16:$src1, (and CL, 31)), |
| 669 | (SAR16rCL GR16:$src1)>; |
| 670 | def : Pat<(sra GR32:$src1, (and CL, 31)), |
| 671 | (SAR32rCL GR32:$src1)>; |
| 672 | def : Pat<(store (sra (loadi8 addr:$dst), (and CL, 31)), addr:$dst), |
| 673 | (SAR8mCL addr:$dst)>; |
| 674 | def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst), |
| 675 | (SAR16mCL addr:$dst)>; |
| 676 | def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst), |
| 677 | (SAR32mCL addr:$dst)>; |
| 678 | |
| 679 | // (shl x (and y, 63)) ==> (shl x, y) |
| 680 | def : Pat<(shl GR64:$src1, (and CL, 63)), |
| 681 | (SHL64rCL GR64:$src1)>; |
| 682 | def : Pat<(store (shl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), |
| 683 | (SHL64mCL addr:$dst)>; |
| 684 | |
| 685 | def : Pat<(srl GR64:$src1, (and CL, 63)), |
| 686 | (SHR64rCL GR64:$src1)>; |
| 687 | def : Pat<(store (srl (loadi64 addr:$dst), (and CL, 63)), addr:$dst), |
| 688 | (SHR64mCL addr:$dst)>; |
| 689 | |
| 690 | def : Pat<(sra GR64:$src1, (and CL, 63)), |
| 691 | (SAR64rCL GR64:$src1)>; |
| 692 | def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst), |
| 693 | (SAR64mCL addr:$dst)>; |
| 694 | |
| 695 | |
| 696 | // (anyext (setcc_carry)) -> (setcc_carry) |
| 697 | def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 698 | (SETB_C16r)>; |
| 699 | def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 700 | (SETB_C32r)>; |
| 701 | def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))), |
| 702 | (SETB_C32r)>; |
| 703 | |
| 704 | // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. |
| 705 | let AddedComplexity = 5 in { // Try this before the selecting to OR |
| 706 | def : Pat<(or_is_add GR16:$src1, imm:$src2), |
| 707 | (ADD16ri GR16:$src1, imm:$src2)>; |
| 708 | def : Pat<(or_is_add GR32:$src1, imm:$src2), |
| 709 | (ADD32ri GR32:$src1, imm:$src2)>; |
| 710 | def : Pat<(or_is_add GR16:$src1, i16immSExt8:$src2), |
| 711 | (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 712 | def : Pat<(or_is_add GR32:$src1, i32immSExt8:$src2), |
| 713 | (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 714 | def : Pat<(or_is_add GR16:$src1, GR16:$src2), |
| 715 | (ADD16rr GR16:$src1, GR16:$src2)>; |
| 716 | def : Pat<(or_is_add GR32:$src1, GR32:$src2), |
| 717 | (ADD32rr GR32:$src1, GR32:$src2)>; |
| 718 | def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2), |
| 719 | (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 720 | def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2), |
| 721 | (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 722 | def : Pat<(or_is_add GR64:$src1, GR64:$src2), |
| 723 | (ADD64rr GR64:$src1, GR64:$src2)>; |
| 724 | } // AddedComplexity |
| 725 | |
| 726 | //===----------------------------------------------------------------------===// |
| 727 | // EFLAGS-defining Patterns |
| 728 | //===----------------------------------------------------------------------===// |
| 729 | |
| 730 | // add reg, reg |
| 731 | def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; |
| 732 | def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; |
| 733 | def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; |
| 734 | |
| 735 | // add reg, mem |
| 736 | def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), |
| 737 | (ADD8rm GR8:$src1, addr:$src2)>; |
| 738 | def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), |
| 739 | (ADD16rm GR16:$src1, addr:$src2)>; |
| 740 | def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), |
| 741 | (ADD32rm GR32:$src1, addr:$src2)>; |
| 742 | |
| 743 | // add reg, imm |
| 744 | def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; |
| 745 | def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; |
| 746 | def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; |
| 747 | def : Pat<(add GR16:$src1, i16immSExt8:$src2), |
| 748 | (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 749 | def : Pat<(add GR32:$src1, i32immSExt8:$src2), |
| 750 | (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 751 | |
| 752 | // sub reg, reg |
| 753 | def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; |
| 754 | def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; |
| 755 | def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; |
| 756 | |
| 757 | // sub reg, mem |
| 758 | def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), |
| 759 | (SUB8rm GR8:$src1, addr:$src2)>; |
| 760 | def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), |
| 761 | (SUB16rm GR16:$src1, addr:$src2)>; |
| 762 | def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), |
| 763 | (SUB32rm GR32:$src1, addr:$src2)>; |
| 764 | |
| 765 | // sub reg, imm |
| 766 | def : Pat<(sub GR8:$src1, imm:$src2), |
| 767 | (SUB8ri GR8:$src1, imm:$src2)>; |
| 768 | def : Pat<(sub GR16:$src1, imm:$src2), |
| 769 | (SUB16ri GR16:$src1, imm:$src2)>; |
| 770 | def : Pat<(sub GR32:$src1, imm:$src2), |
| 771 | (SUB32ri GR32:$src1, imm:$src2)>; |
| 772 | def : Pat<(sub GR16:$src1, i16immSExt8:$src2), |
| 773 | (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 774 | def : Pat<(sub GR32:$src1, i32immSExt8:$src2), |
| 775 | (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 776 | |
| 777 | // mul reg, reg |
| 778 | def : Pat<(mul GR16:$src1, GR16:$src2), |
| 779 | (IMUL16rr GR16:$src1, GR16:$src2)>; |
| 780 | def : Pat<(mul GR32:$src1, GR32:$src2), |
| 781 | (IMUL32rr GR32:$src1, GR32:$src2)>; |
| 782 | |
| 783 | // mul reg, mem |
| 784 | def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), |
| 785 | (IMUL16rm GR16:$src1, addr:$src2)>; |
| 786 | def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), |
| 787 | (IMUL32rm GR32:$src1, addr:$src2)>; |
| 788 | |
| 789 | // mul reg, imm |
| 790 | def : Pat<(mul GR16:$src1, imm:$src2), |
| 791 | (IMUL16rri GR16:$src1, imm:$src2)>; |
| 792 | def : Pat<(mul GR32:$src1, imm:$src2), |
| 793 | (IMUL32rri GR32:$src1, imm:$src2)>; |
| 794 | def : Pat<(mul GR16:$src1, i16immSExt8:$src2), |
| 795 | (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; |
| 796 | def : Pat<(mul GR32:$src1, i32immSExt8:$src2), |
| 797 | (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; |
| 798 | |
| 799 | // reg = mul mem, imm |
| 800 | def : Pat<(mul (loadi16 addr:$src1), imm:$src2), |
| 801 | (IMUL16rmi addr:$src1, imm:$src2)>; |
| 802 | def : Pat<(mul (loadi32 addr:$src1), imm:$src2), |
| 803 | (IMUL32rmi addr:$src1, imm:$src2)>; |
| 804 | def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), |
| 805 | (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; |
| 806 | def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), |
| 807 | (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; |
| 808 | |
| 809 | // Optimize multiply by 2 with EFLAGS result. |
| 810 | let AddedComplexity = 2 in { |
| 811 | def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>; |
| 812 | def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>; |
| 813 | } |
| 814 | |
| 815 | // Patterns for nodes that do not produce flags, for instructions that do. |
| 816 | |
| 817 | // addition |
| 818 | def : Pat<(add GR64:$src1, GR64:$src2), |
| 819 | (ADD64rr GR64:$src1, GR64:$src2)>; |
| 820 | def : Pat<(add GR64:$src1, i64immSExt8:$src2), |
| 821 | (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 822 | def : Pat<(add GR64:$src1, i64immSExt32:$src2), |
| 823 | (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 824 | def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), |
| 825 | (ADD64rm GR64:$src1, addr:$src2)>; |
| 826 | |
| 827 | // subtraction |
| 828 | def : Pat<(sub GR64:$src1, GR64:$src2), |
| 829 | (SUB64rr GR64:$src1, GR64:$src2)>; |
| 830 | def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), |
| 831 | (SUB64rm GR64:$src1, addr:$src2)>; |
| 832 | def : Pat<(sub GR64:$src1, i64immSExt8:$src2), |
| 833 | (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 834 | def : Pat<(sub GR64:$src1, i64immSExt32:$src2), |
| 835 | (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 836 | |
| 837 | // Multiply |
| 838 | def : Pat<(mul GR64:$src1, GR64:$src2), |
| 839 | (IMUL64rr GR64:$src1, GR64:$src2)>; |
| 840 | def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), |
| 841 | (IMUL64rm GR64:$src1, addr:$src2)>; |
| 842 | def : Pat<(mul GR64:$src1, i64immSExt8:$src2), |
| 843 | (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; |
| 844 | def : Pat<(mul GR64:$src1, i64immSExt32:$src2), |
| 845 | (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; |
| 846 | def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), |
| 847 | (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; |
| 848 | def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), |
| 849 | (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; |
| 850 | |
| 851 | // Increment reg. |
| 852 | def : Pat<(add GR8 :$src, 1), (INC8r GR8 :$src)>; |
| 853 | def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>; |
| 854 | def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>; |
| 855 | def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>; |
| 856 | def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>; |
| 857 | def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; |
| 858 | |
| 859 | // Decrement reg. |
| 860 | def : Pat<(add GR8 :$src, -1), (DEC8r GR8 :$src)>; |
| 861 | def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>; |
| 862 | def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>; |
| 863 | def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>; |
| 864 | def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>; |
| 865 | def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; |
| 866 | |
| 867 | // or reg/reg. |
| 868 | def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; |
| 869 | def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; |
| 870 | def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; |
| 871 | def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; |
| 872 | |
| 873 | // or reg/mem |
| 874 | def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), |
| 875 | (OR8rm GR8:$src1, addr:$src2)>; |
| 876 | def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), |
| 877 | (OR16rm GR16:$src1, addr:$src2)>; |
| 878 | def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), |
| 879 | (OR32rm GR32:$src1, addr:$src2)>; |
| 880 | def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), |
| 881 | (OR64rm GR64:$src1, addr:$src2)>; |
| 882 | |
| 883 | // or reg/imm |
| 884 | def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; |
| 885 | def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; |
| 886 | def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; |
| 887 | def : Pat<(or GR16:$src1, i16immSExt8:$src2), |
| 888 | (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 889 | def : Pat<(or GR32:$src1, i32immSExt8:$src2), |
| 890 | (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 891 | def : Pat<(or GR64:$src1, i64immSExt8:$src2), |
| 892 | (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 893 | def : Pat<(or GR64:$src1, i64immSExt32:$src2), |
| 894 | (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 895 | |
| 896 | // xor reg/reg |
| 897 | def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; |
| 898 | def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; |
| 899 | def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; |
| 900 | def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; |
| 901 | |
| 902 | // xor reg/mem |
| 903 | def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), |
| 904 | (XOR8rm GR8:$src1, addr:$src2)>; |
| 905 | def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), |
| 906 | (XOR16rm GR16:$src1, addr:$src2)>; |
| 907 | def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), |
| 908 | (XOR32rm GR32:$src1, addr:$src2)>; |
| 909 | def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), |
| 910 | (XOR64rm GR64:$src1, addr:$src2)>; |
| 911 | |
| 912 | // xor reg/imm |
| 913 | def : Pat<(xor GR8:$src1, imm:$src2), |
| 914 | (XOR8ri GR8:$src1, imm:$src2)>; |
| 915 | def : Pat<(xor GR16:$src1, imm:$src2), |
| 916 | (XOR16ri GR16:$src1, imm:$src2)>; |
| 917 | def : Pat<(xor GR32:$src1, imm:$src2), |
| 918 | (XOR32ri GR32:$src1, imm:$src2)>; |
| 919 | def : Pat<(xor GR16:$src1, i16immSExt8:$src2), |
| 920 | (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 921 | def : Pat<(xor GR32:$src1, i32immSExt8:$src2), |
| 922 | (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 923 | def : Pat<(xor GR64:$src1, i64immSExt8:$src2), |
| 924 | (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 925 | def : Pat<(xor GR64:$src1, i64immSExt32:$src2), |
| 926 | (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 927 | |
| 928 | // and reg/reg |
| 929 | def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; |
| 930 | def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; |
| 931 | def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; |
| 932 | def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; |
| 933 | |
| 934 | // and reg/mem |
| 935 | def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), |
| 936 | (AND8rm GR8:$src1, addr:$src2)>; |
| 937 | def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), |
| 938 | (AND16rm GR16:$src1, addr:$src2)>; |
| 939 | def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), |
| 940 | (AND32rm GR32:$src1, addr:$src2)>; |
| 941 | def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), |
| 942 | (AND64rm GR64:$src1, addr:$src2)>; |
| 943 | |
| 944 | // and reg/imm |
| 945 | def : Pat<(and GR8:$src1, imm:$src2), |
| 946 | (AND8ri GR8:$src1, imm:$src2)>; |
| 947 | def : Pat<(and GR16:$src1, imm:$src2), |
| 948 | (AND16ri GR16:$src1, imm:$src2)>; |
| 949 | def : Pat<(and GR32:$src1, imm:$src2), |
| 950 | (AND32ri GR32:$src1, imm:$src2)>; |
| 951 | def : Pat<(and GR16:$src1, i16immSExt8:$src2), |
| 952 | (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; |
| 953 | def : Pat<(and GR32:$src1, i32immSExt8:$src2), |
| 954 | (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; |
| 955 | def : Pat<(and GR64:$src1, i64immSExt8:$src2), |
| 956 | (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; |
| 957 | def : Pat<(and GR64:$src1, i64immSExt32:$src2), |
| 958 | (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; |
| 959 | |
| 960 | |
| 961 | |