blob: 8835d0aaa180b84b053f2adf4e59b934c59af924 [file] [log] [blame]
Jia Liu31d157a2012-02-18 12:03:15 +00001//===-- X86InstrExtension.td - Sign and Zero Extensions ----*- tablegen -*-===//
2//
Chris Lattner2c065e12010-10-05 06:52:35 +00003// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Jia Liu31d157a2012-02-18 12:03:15 +00007//
Chris Lattner2c065e12010-10-05 06:52:35 +00008//===----------------------------------------------------------------------===//
9//
10// This file describes the sign and zero extension operations.
11//
12//===----------------------------------------------------------------------===//
13
14let neverHasSideEffects = 1 in {
15 let Defs = [AX], Uses = [AL] in
16 def CBW : I<0x98, RawFrm, (outs), (ins),
17 "{cbtw|cbw}", []>, OpSize; // AX = signext(AL)
18 let Defs = [EAX], Uses = [AX] in
19 def CWDE : I<0x98, RawFrm, (outs), (ins),
20 "{cwtl|cwde}", []>; // EAX = signext(AX)
21
22 let Defs = [AX,DX], Uses = [AX] in
23 def CWD : I<0x99, RawFrm, (outs), (ins),
24 "{cwtd|cwd}", []>, OpSize; // DX:AX = signext(AX)
25 let Defs = [EAX,EDX], Uses = [EAX] in
26 def CDQ : I<0x99, RawFrm, (outs), (ins),
27 "{cltd|cdq}", []>; // EDX:EAX = signext(EAX)
28
29
30 let Defs = [RAX], Uses = [EAX] in
31 def CDQE : RI<0x98, RawFrm, (outs), (ins),
32 "{cltq|cdqe}", []>; // RAX = signext(EAX)
33
34 let Defs = [RAX,RDX], Uses = [RAX] in
35 def CQO : RI<0x99, RawFrm, (outs), (ins),
36 "{cqto|cqo}", []>; // RDX:RAX = signext(RAX)
37}
38
39
40// Sign/Zero extenders
Stuart Hastings0e29ed02011-05-20 19:04:40 +000041def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
42 "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
43def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
44 "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
45def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8:$src),
Chris Lattner2c065e12010-10-05 06:52:35 +000046 "movs{bl|x}\t{$src, $dst|$dst, $src}",
47 [(set GR32:$dst, (sext GR8:$src))]>, TB;
48def MOVSX32rm8 : I<0xBE, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
49 "movs{bl|x}\t{$src, $dst|$dst, $src}",
50 [(set GR32:$dst, (sextloadi32i8 addr:$src))]>, TB;
51def MOVSX32rr16: I<0xBF, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
52 "movs{wl|x}\t{$src, $dst|$dst, $src}",
53 [(set GR32:$dst, (sext GR16:$src))]>, TB;
54def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
55 "movs{wl|x}\t{$src, $dst|$dst, $src}",
56 [(set GR32:$dst, (sextloadi32i16 addr:$src))]>, TB;
57
Stuart Hastings0e29ed02011-05-20 19:04:40 +000058def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
59 "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
60def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
61 "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
Chris Lattner2c065e12010-10-05 06:52:35 +000062def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
63 "movz{bl|x}\t{$src, $dst|$dst, $src}",
64 [(set GR32:$dst, (zext GR8:$src))]>, TB;
65def MOVZX32rm8 : I<0xB6, MRMSrcMem, (outs GR32:$dst), (ins i8mem :$src),
66 "movz{bl|x}\t{$src, $dst|$dst, $src}",
67 [(set GR32:$dst, (zextloadi32i8 addr:$src))]>, TB;
68def MOVZX32rr16: I<0xB7, MRMSrcReg, (outs GR32:$dst), (ins GR16:$src),
69 "movz{wl|x}\t{$src, $dst|$dst, $src}",
70 [(set GR32:$dst, (zext GR16:$src))]>, TB;
71def MOVZX32rm16: I<0xB7, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src),
72 "movz{wl|x}\t{$src, $dst|$dst, $src}",
73 [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
74
75// These are the same as the regular MOVZX32rr8 and MOVZX32rm8
76// except that they use GR32_NOREX for the output operand register class
77// instead of GR32. This allows them to operate on h registers on x86-64.
78def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
Jakob Stoklund Olesenb66f1842011-10-07 20:15:54 +000079 (outs GR32_NOREX:$dst), (ins GR8_NOREX:$src),
Chris Lattner4164f6b2010-11-01 04:44:29 +000080 "movz{bl|x}\t{$src, $dst|$dst, $src}",
Chris Lattner2c065e12010-10-05 06:52:35 +000081 []>, TB;
82let mayLoad = 1 in
83def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
Jakob Stoklund Olesenb66f1842011-10-07 20:15:54 +000084 (outs GR32_NOREX:$dst), (ins i8mem_NOREX:$src),
Chris Lattner4164f6b2010-11-01 04:44:29 +000085 "movz{bl|x}\t{$src, $dst|$dst, $src}",
Chris Lattner2c065e12010-10-05 06:52:35 +000086 []>, TB;
87
88// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
89// operand, which makes it a rare instruction with an 8-bit register
90// operand that can never access an h register. If support for h registers
91// were generalized, this would require a special register class.
92def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
93 "movs{bq|x}\t{$src, $dst|$dst, $src}",
94 [(set GR64:$dst, (sext GR8:$src))]>, TB;
95def MOVSX64rm8 : RI<0xBE, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
96 "movs{bq|x}\t{$src, $dst|$dst, $src}",
97 [(set GR64:$dst, (sextloadi64i8 addr:$src))]>, TB;
98def MOVSX64rr16: RI<0xBF, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
99 "movs{wq|x}\t{$src, $dst|$dst, $src}",
100 [(set GR64:$dst, (sext GR16:$src))]>, TB;
101def MOVSX64rm16: RI<0xBF, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
102 "movs{wq|x}\t{$src, $dst|$dst, $src}",
103 [(set GR64:$dst, (sextloadi64i16 addr:$src))]>, TB;
104def MOVSX64rr32: RI<0x63, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src),
105 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
106 [(set GR64:$dst, (sext GR32:$src))]>;
107def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
108 "movs{lq|xd}\t{$src, $dst|$dst, $src}",
109 [(set GR64:$dst, (sextloadi64i32 addr:$src))]>;
110
111// movzbq and movzwq encodings for the disassembler
112def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src),
113 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
114def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src),
115 "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB;
116def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
117 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
118def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
119 "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
120
Chris Lattnera4a3a5e2010-10-31 19:15:18 +0000121// FIXME: These should be Pat patterns.
122let isCodeGenOnly = 1 in {
123
Chris Lattner2c065e12010-10-05 06:52:35 +0000124// Use movzbl instead of movzbq when the destination is a register; it's
125// equivalent due to implicit zero-extending, and it has a smaller encoding.
126def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
127 "", [(set GR64:$dst, (zext GR8:$src))]>, TB;
128def MOVZX64rm8 : I<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem :$src),
129 "", [(set GR64:$dst, (zextloadi64i8 addr:$src))]>, TB;
130// Use movzwl instead of movzwq when the destination is a register; it's
131// equivalent due to implicit zero-extending, and it has a smaller encoding.
132def MOVZX64rr16: I<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
133 "", [(set GR64:$dst, (zext GR16:$src))]>, TB;
134def MOVZX64rm16: I<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
135 "", [(set GR64:$dst, (zextloadi64i16 addr:$src))]>, TB;
136
137// There's no movzlq instruction, but movl can be used for this purpose, using
138// implicit zero-extension. The preferred way to do 32-bit-to-64-bit zero
139// extension on x86-64 is to use a SUBREG_TO_REG to utilize implicit
140// zero-extension, however this isn't possible when the 32-bit value is
141// defined by a truncate or is copied from something where the high bits aren't
142// necessarily all zero. In such cases, we fall back to these explicit zext
143// instructions.
144def MOVZX64rr32 : I<0x89, MRMDestReg, (outs GR64:$dst), (ins GR32:$src),
145 "", [(set GR64:$dst, (zext GR32:$src))]>;
146def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
147 "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
148
149
Chris Lattnera4a3a5e2010-10-31 19:15:18 +0000150}
Chris Lattner2c065e12010-10-05 06:52:35 +0000151