blob: 0b3a7d41927be78ecb6d8346233d6e007166a873 [file] [log] [blame]
Evan Chenga8e29892007-01-19 07:51:42 +00001//===- ARMAddressingModes.h - ARM Addressing Modes --------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Evan Chenga8e29892007-01-19 07:51:42 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the ARM addressing mode implementation stuff.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
15#define LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
16
17#include "llvm/CodeGen/SelectionDAGNodes.h"
18#include "llvm/Support/MathExtras.h"
19#include <cassert>
20
21namespace llvm {
Jim Grosbach764ab522009-08-11 15:33:49 +000022
Evan Chenga8e29892007-01-19 07:51:42 +000023/// ARM_AM - ARM Addressing Mode Stuff
24namespace ARM_AM {
25 enum ShiftOpc {
26 no_shift = 0,
27 asr,
28 lsl,
29 lsr,
30 ror,
31 rrx
32 };
Jim Grosbach764ab522009-08-11 15:33:49 +000033
Evan Chenga8e29892007-01-19 07:51:42 +000034 enum AddrOpc {
35 add = '+', sub = '-'
36 };
Jim Grosbach764ab522009-08-11 15:33:49 +000037
Johnny Chen9e088762010-03-17 17:52:21 +000038 static inline const char *getAddrOpcStr(AddrOpc Op) {
39 return Op == sub ? "-" : "";
40 }
41
Evan Chenga8e29892007-01-19 07:51:42 +000042 static inline const char *getShiftOpcStr(ShiftOpc Op) {
43 switch (Op) {
Chris Lattner8514e212009-10-19 21:23:15 +000044 default: assert(0 && "Unknown shift opc!");
Evan Chenga8e29892007-01-19 07:51:42 +000045 case ARM_AM::asr: return "asr";
46 case ARM_AM::lsl: return "lsl";
47 case ARM_AM::lsr: return "lsr";
48 case ARM_AM::ror: return "ror";
49 case ARM_AM::rrx: return "rrx";
50 }
51 }
Jim Grosbach764ab522009-08-11 15:33:49 +000052
Dan Gohman475871a2008-07-27 21:46:04 +000053 static inline ShiftOpc getShiftOpcForNode(SDValue N) {
Evan Chenga8e29892007-01-19 07:51:42 +000054 switch (N.getOpcode()) {
55 default: return ARM_AM::no_shift;
56 case ISD::SHL: return ARM_AM::lsl;
57 case ISD::SRL: return ARM_AM::lsr;
58 case ISD::SRA: return ARM_AM::asr;
59 case ISD::ROTR: return ARM_AM::ror;
60 //case ISD::ROTL: // Only if imm -> turn into ROTR.
61 // Can't handle RRX here, because it would require folding a flag into
62 // the addressing mode. :( This causes us to miss certain things.
63 //case ARMISD::RRX: return ARM_AM::rrx;
64 }
65 }
66
67 enum AMSubMode {
68 bad_am_submode = 0,
69 ia,
70 ib,
71 da,
72 db
73 };
74
75 static inline const char *getAMSubModeStr(AMSubMode Mode) {
76 switch (Mode) {
Chris Lattner8514e212009-10-19 21:23:15 +000077 default: assert(0 && "Unknown addressing sub-mode!");
Evan Chenga8e29892007-01-19 07:51:42 +000078 case ARM_AM::ia: return "ia";
79 case ARM_AM::ib: return "ib";
80 case ARM_AM::da: return "da";
81 case ARM_AM::db: return "db";
82 }
83 }
84
Evan Chenga8e29892007-01-19 07:51:42 +000085 /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits.
86 ///
87 static inline unsigned rotr32(unsigned Val, unsigned Amt) {
88 assert(Amt < 32 && "Invalid rotate amount");
89 return (Val >> Amt) | (Val << ((32-Amt)&31));
90 }
Jim Grosbach764ab522009-08-11 15:33:49 +000091
Evan Chenga8e29892007-01-19 07:51:42 +000092 /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
93 ///
94 static inline unsigned rotl32(unsigned Val, unsigned Amt) {
95 assert(Amt < 32 && "Invalid rotate amount");
96 return (Val << Amt) | (Val >> ((32-Amt)&31));
97 }
Jim Grosbach764ab522009-08-11 15:33:49 +000098
Evan Chenga8e29892007-01-19 07:51:42 +000099 //===--------------------------------------------------------------------===//
100 // Addressing Mode #1: shift_operand with registers
101 //===--------------------------------------------------------------------===//
102 //
103 // This 'addressing mode' is used for arithmetic instructions. It can
104 // represent things like:
105 // reg
106 // reg [asr|lsl|lsr|ror|rrx] reg
107 // reg [asr|lsl|lsr|ror|rrx] imm
108 //
109 // This is stored three operands [rega, regb, opc]. The first is the base
110 // reg, the second is the shift amount (or reg0 if not present or imm). The
111 // third operand encodes the shift opcode and the imm if a reg isn't present.
112 //
113 static inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) {
114 return ShOp | (Imm << 3);
115 }
116 static inline unsigned getSORegOffset(unsigned Op) {
117 return Op >> 3;
118 }
119 static inline ShiftOpc getSORegShOp(unsigned Op) {
120 return (ShiftOpc)(Op & 7);
121 }
122
123 /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return
124 /// the 8-bit imm value.
125 static inline unsigned getSOImmValImm(unsigned Imm) {
126 return Imm & 0xFF;
127 }
Bob Wilsond83712a2009-03-30 18:49:37 +0000128 /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return
Evan Chenga8e29892007-01-19 07:51:42 +0000129 /// the rotate amount.
130 static inline unsigned getSOImmValRot(unsigned Imm) {
131 return (Imm >> 8) * 2;
132 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000133
Johnny Chen9e088762010-03-17 17:52:21 +0000134 /// getSOImmValOneRotate - Try to handle Imm with an immediate shifter
135 /// operand, computing the rotate amount to use. If this immediate value
136 /// cannot be handled with a single shifter-op, return 0.
137 static inline unsigned getSOImmValOneRotate(unsigned Imm) {
138 // A5.2.4 Constants with multiple encodings
139 // The lowest unsigned value of rotation wins!
140 for (unsigned R = 1; R <= 15; ++R)
141 if ((Imm & rotr32(~255U, 2*R)) == 0)
142 return 2*R;
143
144 // Failed to find a suitable rotate amount.
145 return 0;
146 }
147
Evan Chenga8e29892007-01-19 07:51:42 +0000148 /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
149 /// computing the rotate amount to use. If this immediate value cannot be
150 /// handled with a single shifter-op, determine a good rotate amount that will
151 /// take a maximal chunk of bits out of the immediate.
152 static inline unsigned getSOImmValRotate(unsigned Imm) {
153 // 8-bit (or less) immediates are trivially shifter_operands with a rotate
154 // of zero.
155 if ((Imm & ~255U) == 0) return 0;
Jim Grosbach764ab522009-08-11 15:33:49 +0000156
Evan Chenga8e29892007-01-19 07:51:42 +0000157 // Use CTZ to compute the rotate amount.
158 unsigned TZ = CountTrailingZeros_32(Imm);
Jim Grosbach764ab522009-08-11 15:33:49 +0000159
Evan Chenga8e29892007-01-19 07:51:42 +0000160 // Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
161 // not 9.
162 unsigned RotAmt = TZ & ~1;
Jim Grosbach764ab522009-08-11 15:33:49 +0000163
Evan Chenga8e29892007-01-19 07:51:42 +0000164 // If we can handle this spread, return it.
165 if ((rotr32(Imm, RotAmt) & ~255U) == 0)
166 return (32-RotAmt)&31; // HW rotates right, not left.
167
168 // For values like 0xF000000F, we should skip the first run of ones, then
169 // retry the hunt.
170 if (Imm & 1) {
171 unsigned TrailingOnes = CountTrailingZeros_32(~Imm);
172 if (TrailingOnes != 32) { // Avoid overflow on 0xFFFFFFFF
173 // Restart the search for a high-order bit after the initial seconds of
174 // ones.
175 unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1));
Jim Grosbach764ab522009-08-11 15:33:49 +0000176
Evan Chenga8e29892007-01-19 07:51:42 +0000177 // Rotate amount must be even.
178 unsigned RotAmt2 = TZ2 & ~1;
Jim Grosbach764ab522009-08-11 15:33:49 +0000179
Evan Chenga8e29892007-01-19 07:51:42 +0000180 // If this fits, use it.
181 if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0)
182 return (32-RotAmt2)&31; // HW rotates right, not left.
183 }
184 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000185
Evan Chenga8e29892007-01-19 07:51:42 +0000186 // Otherwise, we have no way to cover this span of bits with a single
187 // shifter_op immediate. Return a chunk of bits that will be useful to
188 // handle.
189 return (32-RotAmt)&31; // HW rotates right, not left.
190 }
191
192 /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit
193 /// into an shifter_operand immediate operand, return the 12-bit encoding for
194 /// it. If not, return -1.
195 static inline int getSOImmVal(unsigned Arg) {
196 // 8-bit (or less) immediates are trivially shifter_operands with a rotate
197 // of zero.
198 if ((Arg & ~255U) == 0) return Arg;
Jim Grosbach764ab522009-08-11 15:33:49 +0000199
Johnny Chen9e088762010-03-17 17:52:21 +0000200 unsigned RotAmt = getSOImmValOneRotate(Arg);
Evan Chenga8e29892007-01-19 07:51:42 +0000201
202 // If this cannot be handled with a single shifter_op, bail out.
203 if (rotr32(~255U, RotAmt) & Arg)
204 return -1;
Jim Grosbach764ab522009-08-11 15:33:49 +0000205
Evan Chenga8e29892007-01-19 07:51:42 +0000206 // Encode this correctly.
207 return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
208 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000209
Evan Chenga8e29892007-01-19 07:51:42 +0000210 /// isSOImmTwoPartVal - Return true if the specified value can be obtained by
211 /// or'ing together two SOImmVal's.
212 static inline bool isSOImmTwoPartVal(unsigned V) {
213 // If this can be handled with a single shifter_op, bail out.
214 V = rotr32(~255U, getSOImmValRotate(V)) & V;
215 if (V == 0)
216 return false;
Jim Grosbach764ab522009-08-11 15:33:49 +0000217
Evan Chenga8e29892007-01-19 07:51:42 +0000218 // If this can be handled with two shifter_op's, accept.
219 V = rotr32(~255U, getSOImmValRotate(V)) & V;
220 return V == 0;
221 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000222
Evan Chenga8e29892007-01-19 07:51:42 +0000223 /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
224 /// return the first chunk of it.
225 static inline unsigned getSOImmTwoPartFirst(unsigned V) {
226 return rotr32(255U, getSOImmValRotate(V)) & V;
227 }
228
229 /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
230 /// return the second chunk of it.
231 static inline unsigned getSOImmTwoPartSecond(unsigned V) {
Jim Grosbach764ab522009-08-11 15:33:49 +0000232 // Mask out the first hunk.
Evan Chenga8e29892007-01-19 07:51:42 +0000233 V = rotr32(~255U, getSOImmValRotate(V)) & V;
Jim Grosbach764ab522009-08-11 15:33:49 +0000234
Evan Chenga8e29892007-01-19 07:51:42 +0000235 // Take what's left.
236 assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
237 return V;
238 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000239
Evan Chenga8e29892007-01-19 07:51:42 +0000240 /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
241 /// by a left shift. Returns the shift amount to use.
242 static inline unsigned getThumbImmValShift(unsigned Imm) {
243 // 8-bit (or less) immediates are trivially immediate operand with a shift
244 // of zero.
245 if ((Imm & ~255U) == 0) return 0;
246
247 // Use CTZ to compute the shift amount.
248 return CountTrailingZeros_32(Imm);
249 }
250
251 /// isThumbImmShiftedVal - Return true if the specified value can be obtained
252 /// by left shifting a 8-bit immediate.
253 static inline bool isThumbImmShiftedVal(unsigned V) {
Jim Grosbach764ab522009-08-11 15:33:49 +0000254 // If this can be handled with
Evan Chenga8e29892007-01-19 07:51:42 +0000255 V = (~255U << getThumbImmValShift(V)) & V;
256 return V == 0;
257 }
258
Evan Chengf49810c2009-06-23 17:48:47 +0000259 /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed
260 /// by a left shift. Returns the shift amount to use.
261 static inline unsigned getThumbImm16ValShift(unsigned Imm) {
262 // 16-bit (or less) immediates are trivially immediate operand with a shift
263 // of zero.
264 if ((Imm & ~65535U) == 0) return 0;
265
266 // Use CTZ to compute the shift amount.
267 return CountTrailingZeros_32(Imm);
268 }
269
Jim Grosbach764ab522009-08-11 15:33:49 +0000270 /// isThumbImm16ShiftedVal - Return true if the specified value can be
Evan Chengf49810c2009-06-23 17:48:47 +0000271 /// obtained by left shifting a 16-bit immediate.
272 static inline bool isThumbImm16ShiftedVal(unsigned V) {
Jim Grosbach764ab522009-08-11 15:33:49 +0000273 // If this can be handled with
Evan Chengf49810c2009-06-23 17:48:47 +0000274 V = (~65535U << getThumbImm16ValShift(V)) & V;
275 return V == 0;
276 }
277
Evan Chenga8e29892007-01-19 07:51:42 +0000278 /// getThumbImmNonShiftedVal - If V is a value that satisfies
279 /// isThumbImmShiftedVal, return the non-shiftd value.
280 static inline unsigned getThumbImmNonShiftedVal(unsigned V) {
281 return V >> getThumbImmValShift(V);
282 }
283
Evan Cheng6495f632009-07-28 05:48:47 +0000284
Evan Chengf49810c2009-06-23 17:48:47 +0000285 /// getT2SOImmValSplat - Return the 12-bit encoded representation
286 /// if the specified value can be obtained by splatting the low 8 bits
287 /// into every other byte or every byte of a 32-bit value. i.e.,
288 /// 00000000 00000000 00000000 abcdefgh control = 0
289 /// 00000000 abcdefgh 00000000 abcdefgh control = 1
290 /// abcdefgh 00000000 abcdefgh 00000000 control = 2
291 /// abcdefgh abcdefgh abcdefgh abcdefgh control = 3
292 /// Return -1 if none of the above apply.
293 /// See ARM Reference Manual A6.3.2.
Evan Cheng6495f632009-07-28 05:48:47 +0000294 static inline int getT2SOImmValSplatVal(unsigned V) {
Evan Chengf49810c2009-06-23 17:48:47 +0000295 unsigned u, Vs, Imm;
296 // control = 0
Jim Grosbach764ab522009-08-11 15:33:49 +0000297 if ((V & 0xffffff00) == 0)
Evan Chengf49810c2009-06-23 17:48:47 +0000298 return V;
Jim Grosbach764ab522009-08-11 15:33:49 +0000299
Evan Chengf49810c2009-06-23 17:48:47 +0000300 // If the value is zeroes in the first byte, just shift those off
301 Vs = ((V & 0xff) == 0) ? V >> 8 : V;
302 // Any passing value only has 8 bits of payload, splatted across the word
303 Imm = Vs & 0xff;
304 // Likewise, any passing values have the payload splatted into the 3rd byte
305 u = Imm | (Imm << 16);
306
307 // control = 1 or 2
308 if (Vs == u)
309 return (((Vs == V) ? 1 : 2) << 8) | Imm;
310
311 // control = 3
312 if (Vs == (u | (u << 8)))
313 return (3 << 8) | Imm;
314
315 return -1;
316 }
317
Evan Cheng6495f632009-07-28 05:48:47 +0000318 /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the
Evan Chengf49810c2009-06-23 17:48:47 +0000319 /// specified value is a rotated 8-bit value. Return -1 if no rotation
320 /// encoding is possible.
321 /// See ARM Reference Manual A6.3.2.
Evan Cheng6495f632009-07-28 05:48:47 +0000322 static inline int getT2SOImmValRotateVal(unsigned V) {
Evan Chengf49810c2009-06-23 17:48:47 +0000323 unsigned RotAmt = CountLeadingZeros_32(V);
324 if (RotAmt >= 24)
325 return -1;
326
327 // If 'Arg' can be handled with a single shifter_op return the value.
328 if ((rotr32(0xff000000U, RotAmt) & V) == V)
329 return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7);
330
331 return -1;
332 }
333
334 /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
Jim Grosbach764ab522009-08-11 15:33:49 +0000335 /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
Evan Chengf49810c2009-06-23 17:48:47 +0000336 /// encoding for it. If not, return -1.
337 /// See ARM Reference Manual A6.3.2.
338 static inline int getT2SOImmVal(unsigned Arg) {
339 // If 'Arg' is an 8-bit splat, then get the encoded value.
Evan Cheng6495f632009-07-28 05:48:47 +0000340 int Splat = getT2SOImmValSplatVal(Arg);
Evan Chengf49810c2009-06-23 17:48:47 +0000341 if (Splat != -1)
342 return Splat;
Jim Grosbach764ab522009-08-11 15:33:49 +0000343
Evan Chengf49810c2009-06-23 17:48:47 +0000344 // If 'Arg' can be handled with a single shifter_op return the value.
Evan Cheng6495f632009-07-28 05:48:47 +0000345 int Rot = getT2SOImmValRotateVal(Arg);
Evan Chengf49810c2009-06-23 17:48:47 +0000346 if (Rot != -1)
347 return Rot;
348
349 return -1;
350 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000351
Jim Grosbach65b7f3a2009-10-21 20:44:34 +0000352 static inline unsigned getT2SOImmValRotate(unsigned V) {
353 if ((V & ~255U) == 0) return 0;
354 // Use CTZ to compute the rotate amount.
355 unsigned RotAmt = CountTrailingZeros_32(V);
356 return (32 - RotAmt) & 31;
357 }
358
359 static inline bool isT2SOImmTwoPartVal (unsigned Imm) {
360 unsigned V = Imm;
361 // Passing values can be any combination of splat values and shifter
362 // values. If this can be handled with a single shifter or splat, bail
363 // out. Those should be handled directly, not with a two-part val.
364 if (getT2SOImmValSplatVal(V) != -1)
365 return false;
366 V = rotr32 (~255U, getT2SOImmValRotate(V)) & V;
367 if (V == 0)
368 return false;
369
370 // If this can be handled as an immediate, accept.
371 if (getT2SOImmVal(V) != -1) return true;
372
373 // Likewise, try masking out a splat value first.
374 V = Imm;
375 if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1)
376 V &= ~0xff00ff00U;
377 else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1)
378 V &= ~0x00ff00ffU;
379 // If what's left can be handled as an immediate, accept.
380 if (getT2SOImmVal(V) != -1) return true;
381
382 // Otherwise, do not accept.
383 return false;
384 }
385
386 static inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) {
387 assert (isT2SOImmTwoPartVal(Imm) &&
388 "Immedate cannot be encoded as two part immediate!");
389 // Try a shifter operand as one part
390 unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm;
391 // If the rest is encodable as an immediate, then return it.
392 if (getT2SOImmVal(V) != -1) return V;
393
394 // Try masking out a splat value first.
395 if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1)
396 return Imm & 0xff00ff00U;
397
398 // The other splat is all that's left as an option.
399 assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1);
400 return Imm & 0x00ff00ffU;
401 }
402
403 static inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) {
404 // Mask out the first hunk
405 Imm ^= getT2SOImmTwoPartFirst(Imm);
406 // Return what's left
407 assert (getT2SOImmVal(Imm) != -1 &&
408 "Unable to encode second part of T2 two part SO immediate");
409 return Imm;
410 }
411
Evan Chengf49810c2009-06-23 17:48:47 +0000412
Evan Chenga8e29892007-01-19 07:51:42 +0000413 //===--------------------------------------------------------------------===//
414 // Addressing Mode #2
415 //===--------------------------------------------------------------------===//
416 //
417 // This is used for most simple load/store instructions.
418 //
419 // addrmode2 := reg +/- reg shop imm
420 // addrmode2 := reg +/- imm12
421 //
422 // The first operand is always a Reg. The second operand is a reg if in
423 // reg/reg form, otherwise it's reg#0. The third field encodes the operation
424 // in bit 12, the immediate in bits 0-11, and the shift op in 13-15.
425 //
426 // If this addressing mode is a frame index (before prolog/epilog insertion
427 // and code rewriting), this operand will have the form: FI#, reg0, <offs>
428 // with no shift amount for the frame offset.
Jim Grosbach764ab522009-08-11 15:33:49 +0000429 //
Evan Chenga8e29892007-01-19 07:51:42 +0000430 static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) {
431 assert(Imm12 < (1 << 12) && "Imm too large!");
432 bool isSub = Opc == sub;
433 return Imm12 | ((int)isSub << 12) | (SO << 13);
434 }
435 static inline unsigned getAM2Offset(unsigned AM2Opc) {
436 return AM2Opc & ((1 << 12)-1);
437 }
438 static inline AddrOpc getAM2Op(unsigned AM2Opc) {
439 return ((AM2Opc >> 12) & 1) ? sub : add;
440 }
441 static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
442 return (ShiftOpc)(AM2Opc >> 13);
443 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000444
445
Evan Chenga8e29892007-01-19 07:51:42 +0000446 //===--------------------------------------------------------------------===//
447 // Addressing Mode #3
448 //===--------------------------------------------------------------------===//
449 //
450 // This is used for sign-extending loads, and load/store-pair instructions.
451 //
452 // addrmode3 := reg +/- reg
453 // addrmode3 := reg +/- imm8
454 //
455 // The first operand is always a Reg. The second operand is a reg if in
456 // reg/reg form, otherwise it's reg#0. The third field encodes the operation
457 // in bit 8, the immediate in bits 0-7.
Jim Grosbach764ab522009-08-11 15:33:49 +0000458
Evan Chenga8e29892007-01-19 07:51:42 +0000459 /// getAM3Opc - This function encodes the addrmode3 opc field.
460 static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) {
461 bool isSub = Opc == sub;
462 return ((int)isSub << 8) | Offset;
463 }
464 static inline unsigned char getAM3Offset(unsigned AM3Opc) {
465 return AM3Opc & 0xFF;
466 }
467 static inline AddrOpc getAM3Op(unsigned AM3Opc) {
468 return ((AM3Opc >> 8) & 1) ? sub : add;
469 }
Jim Grosbach764ab522009-08-11 15:33:49 +0000470
Evan Chenga8e29892007-01-19 07:51:42 +0000471 //===--------------------------------------------------------------------===//
472 // Addressing Mode #4
473 //===--------------------------------------------------------------------===//
474 //
475 // This is used for load / store multiple instructions.
476 //
477 // addrmode4 := reg, <mode>
478 //
479 // The four modes are:
480 // IA - Increment after
481 // IB - Increment before
482 // DA - Decrement after
483 // DB - Decrement before
Evan Chenga8e29892007-01-19 07:51:42 +0000484
485 static inline AMSubMode getAM4SubMode(unsigned Mode) {
486 return (AMSubMode)(Mode & 0x7);
487 }
488
Bob Wilsonab346052010-03-16 17:46:45 +0000489 static inline unsigned getAM4ModeImm(AMSubMode SubMode) {
490 return (int)SubMode;
Evan Chenga8e29892007-01-19 07:51:42 +0000491 }
492
493 //===--------------------------------------------------------------------===//
494 // Addressing Mode #5
495 //===--------------------------------------------------------------------===//
496 //
497 // This is used for coprocessor instructions, such as FP load/stores.
498 //
499 // addrmode5 := reg +/- imm8*4
500 //
Bob Wilsond4d826e2009-07-01 21:22:45 +0000501 // The first operand is always a Reg. The second operand encodes the
502 // operation in bit 8 and the immediate in bits 0-7.
Evan Chenga8e29892007-01-19 07:51:42 +0000503 //
Bob Wilsond4d826e2009-07-01 21:22:45 +0000504 // This is also used for FP load/store multiple ops. The second operand
Bob Wilson2d357f62010-03-16 18:38:09 +0000505 // encodes the number of registers (or 2 times the number of registers
506 // for DPR ops) in bits 0-7. In addition, bits 8-10 encode one of the
507 // following two sub-modes:
Evan Chenga8e29892007-01-19 07:51:42 +0000508 //
509 // IA - Increment after
510 // DB - Decrement before
Jim Grosbach764ab522009-08-11 15:33:49 +0000511
Evan Chenga8e29892007-01-19 07:51:42 +0000512 /// getAM5Opc - This function encodes the addrmode5 opc field.
513 static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
514 bool isSub = Opc == sub;
515 return ((int)isSub << 8) | Offset;
516 }
517 static inline unsigned char getAM5Offset(unsigned AM5Opc) {
518 return AM5Opc & 0xFF;
519 }
520 static inline AddrOpc getAM5Op(unsigned AM5Opc) {
521 return ((AM5Opc >> 8) & 1) ? sub : add;
522 }
523
Jim Grosbache5165492009-11-09 00:11:35 +0000524 /// getAM5Opc - This function encodes the addrmode5 opc field for VLDM and
525 /// VSTM instructions.
Bob Wilson2d357f62010-03-16 18:38:09 +0000526 static inline unsigned getAM5Opc(AMSubMode SubMode, unsigned char Offset) {
Evan Chenga8e29892007-01-19 07:51:42 +0000527 assert((SubMode == ia || SubMode == db) &&
528 "Illegal addressing mode 5 sub-mode!");
Bob Wilson2d357f62010-03-16 18:38:09 +0000529 return ((int)SubMode << 8) | Offset;
Evan Chenga8e29892007-01-19 07:51:42 +0000530 }
531 static inline AMSubMode getAM5SubMode(unsigned AM5Opc) {
Bob Wilson2d357f62010-03-16 18:38:09 +0000532 return (AMSubMode)((AM5Opc >> 8) & 0x7);
Evan Chenga8e29892007-01-19 07:51:42 +0000533 }
Bob Wilson8b024a52009-07-01 23:16:05 +0000534
535 //===--------------------------------------------------------------------===//
536 // Addressing Mode #6
537 //===--------------------------------------------------------------------===//
538 //
539 // This is used for NEON load / store instructions.
540 //
Bob Wilsona43e6bf2010-03-16 23:01:13 +0000541 // addrmode6 := reg with optional writeback and alignment
Bob Wilson8b024a52009-07-01 23:16:05 +0000542 //
Bob Wilsona43e6bf2010-03-16 23:01:13 +0000543 // This is stored in four operands [regaddr, regupdate, opc, align]. The
544 // first is the address register. The second register holds the value of
545 // a post-access increment for writeback or reg0 if no writeback or if the
546 // writeback increment is the size of the memory access. The third
547 // operand encodes whether there is writeback to the address register. The
548 // fourth operand is the value of the alignment specifier to use or zero if
549 // no explicit alignment.
550
551 static inline unsigned getAM6Opc(bool WB = false) {
552 return (int)WB;
553 }
554
555 static inline bool getAM6WBFlag(unsigned Mode) {
556 return Mode & 1;
557 }
Bob Wilson8b024a52009-07-01 23:16:05 +0000558
Evan Chenga8e29892007-01-19 07:51:42 +0000559} // end namespace ARM_AM
560} // end namespace llvm
561
562#endif
563