blob: d2c22af53df78b9cf0ed7c187cd40866716813c7 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Iain Merrick9ac36c92010-09-13 15:29:50 +010028#include <limits.h> // For LONG_MIN, LONG_MAX.
29
Steve Blocka7e24c12009-10-30 11:49:00 +000030#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_ARM)
33
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "bootstrapper.h"
35#include "codegen-inl.h"
36#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
42MacroAssembler::MacroAssembler(void* buffer, int size)
43 : Assembler(buffer, size),
Steve Blocka7e24c12009-10-30 11:49:00 +000044 generating_stub_(false),
45 allow_stub_calls_(true),
46 code_object_(Heap::undefined_value()) {
47}
48
49
50// We always generate arm code, never thumb code, even if V8 is compiled to
51// thumb, so we require inter-working support
52#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
53#error "flag -mthumb-interwork missing"
54#endif
55
56
57// We do not support thumb inter-working with an arm architecture not supporting
58// the blx instruction (below v5t). If you know what CPU you are compiling for
59// you can use -march=armv7 or similar.
60#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
61# error "For thumb inter-working we require an architecture which supports blx"
62#endif
63
64
Steve Blocka7e24c12009-10-30 11:49:00 +000065// Using bx does not yield better code, so use it only when required
66#if defined(USE_THUMB_INTERWORK)
67#define USE_BX 1
68#endif
69
70
71void MacroAssembler::Jump(Register target, Condition cond) {
72#if USE_BX
73 bx(target, cond);
74#else
75 mov(pc, Operand(target), LeaveCC, cond);
76#endif
77}
78
79
80void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
81 Condition cond) {
82#if USE_BX
83 mov(ip, Operand(target, rmode), LeaveCC, cond);
84 bx(ip, cond);
85#else
86 mov(pc, Operand(target, rmode), LeaveCC, cond);
87#endif
88}
89
90
91void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
92 Condition cond) {
93 ASSERT(!RelocInfo::IsCodeTarget(rmode));
94 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
95}
96
97
98void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
99 Condition cond) {
100 ASSERT(RelocInfo::IsCodeTarget(rmode));
101 // 'code' is always generated ARM code, never THUMB code
102 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
103}
104
105
106void MacroAssembler::Call(Register target, Condition cond) {
107#if USE_BLX
108 blx(target, cond);
109#else
110 // set lr for return at current pc + 8
111 mov(lr, Operand(pc), LeaveCC, cond);
112 mov(pc, Operand(target), LeaveCC, cond);
113#endif
114}
115
116
117void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
118 Condition cond) {
Steve Block6ded16b2010-05-10 14:33:55 +0100119#if USE_BLX
120 // On ARMv5 and after the recommended call sequence is:
121 // ldr ip, [pc, #...]
122 // blx ip
123
124 // The two instructions (ldr and blx) could be separated by a constant
125 // pool and the code would still work. The issue comes from the
126 // patching code which expect the ldr to be just above the blx.
127 { BlockConstPoolScope block_const_pool(this);
128 // Statement positions are expected to be recorded when the target
129 // address is loaded. The mov method will automatically record
130 // positions when pc is the target, since this is not the case here
131 // we have to do it explicitly.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800132 positions_recorder()->WriteRecordedPositions();
Steve Block6ded16b2010-05-10 14:33:55 +0100133
134 mov(ip, Operand(target, rmode), LeaveCC, cond);
135 blx(ip, cond);
136 }
137
138 ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
139#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000140 // Set lr for return at current pc + 8.
141 mov(lr, Operand(pc), LeaveCC, cond);
142 // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
143 mov(pc, Operand(target, rmode), LeaveCC, cond);
Steve Block6ded16b2010-05-10 14:33:55 +0100144
Steve Blocka7e24c12009-10-30 11:49:00 +0000145 ASSERT(kCallTargetAddressOffset == kInstrSize);
Steve Block6ded16b2010-05-10 14:33:55 +0100146#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000147}
148
149
150void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
151 Condition cond) {
152 ASSERT(!RelocInfo::IsCodeTarget(rmode));
153 Call(reinterpret_cast<intptr_t>(target), rmode, cond);
154}
155
156
157void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
158 Condition cond) {
159 ASSERT(RelocInfo::IsCodeTarget(rmode));
160 // 'code' is always generated ARM code, never THUMB code
161 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
162}
163
164
165void MacroAssembler::Ret(Condition cond) {
166#if USE_BX
167 bx(lr, cond);
168#else
169 mov(pc, Operand(lr), LeaveCC, cond);
170#endif
171}
172
173
Steve Blockd0582a62009-12-15 09:54:21 +0000174void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
175 LoadRoot(ip, Heap::kStackLimitRootIndex);
176 cmp(sp, Operand(ip));
177 b(lo, on_stack_overflow);
178}
179
180
Leon Clarkee46be812010-01-19 14:06:41 +0000181void MacroAssembler::Drop(int count, Condition cond) {
182 if (count > 0) {
183 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
184 }
185}
186
187
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100188void MacroAssembler::Swap(Register reg1,
189 Register reg2,
190 Register scratch,
191 Condition cond) {
Steve Block6ded16b2010-05-10 14:33:55 +0100192 if (scratch.is(no_reg)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100193 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
194 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
195 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
Steve Block6ded16b2010-05-10 14:33:55 +0100196 } else {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100197 mov(scratch, reg1, LeaveCC, cond);
198 mov(reg1, reg2, LeaveCC, cond);
199 mov(reg2, scratch, LeaveCC, cond);
Steve Block6ded16b2010-05-10 14:33:55 +0100200 }
201}
202
203
Leon Clarkee46be812010-01-19 14:06:41 +0000204void MacroAssembler::Call(Label* target) {
205 bl(target);
206}
207
208
209void MacroAssembler::Move(Register dst, Handle<Object> value) {
210 mov(dst, Operand(value));
211}
Steve Blockd0582a62009-12-15 09:54:21 +0000212
213
Steve Block6ded16b2010-05-10 14:33:55 +0100214void MacroAssembler::Move(Register dst, Register src) {
215 if (!dst.is(src)) {
216 mov(dst, src);
217 }
218}
219
220
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100221void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
222 Condition cond) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800223 if (!src2.is_reg() &&
224 !src2.must_use_constant_pool() &&
225 src2.immediate() == 0) {
Iain Merrick9ac36c92010-09-13 15:29:50 +0100226 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800227
228 } else if (!src2.is_single_instruction() &&
229 !src2.must_use_constant_pool() &&
230 CpuFeatures::IsSupported(ARMv7) &&
231 IsPowerOf2(src2.immediate() + 1)) {
232 ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
233
234 } else {
235 and_(dst, src1, src2, LeaveCC, cond);
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100236 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100237}
238
239
240void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
241 Condition cond) {
242 ASSERT(lsb < 32);
243 if (!CpuFeatures::IsSupported(ARMv7)) {
244 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
245 and_(dst, src1, Operand(mask), LeaveCC, cond);
246 if (lsb != 0) {
247 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
248 }
249 } else {
250 ubfx(dst, src1, lsb, width, cond);
251 }
252}
253
254
255void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
256 Condition cond) {
257 ASSERT(lsb < 32);
258 if (!CpuFeatures::IsSupported(ARMv7)) {
259 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
260 and_(dst, src1, Operand(mask), LeaveCC, cond);
261 int shift_up = 32 - lsb - width;
262 int shift_down = lsb + shift_up;
263 if (shift_up != 0) {
264 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
265 }
266 if (shift_down != 0) {
267 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
268 }
269 } else {
270 sbfx(dst, src1, lsb, width, cond);
271 }
272}
273
274
275void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
276 ASSERT(lsb < 32);
277 if (!CpuFeatures::IsSupported(ARMv7)) {
278 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
279 bic(dst, dst, Operand(mask));
280 } else {
281 bfc(dst, lsb, width, cond);
282 }
283}
284
285
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100286void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
287 Condition cond) {
288 if (!CpuFeatures::IsSupported(ARMv7)) {
289 ASSERT(!dst.is(pc) && !src.rm().is(pc));
290 ASSERT((satpos >= 0) && (satpos <= 31));
291
292 // These asserts are required to ensure compatibility with the ARMv7
293 // implementation.
294 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
295 ASSERT(src.rs().is(no_reg));
296
297 Label done;
298 int satval = (1 << satpos) - 1;
299
300 if (cond != al) {
301 b(NegateCondition(cond), &done); // Skip saturate if !condition.
302 }
303 if (!(src.is_reg() && dst.is(src.rm()))) {
304 mov(dst, src);
305 }
306 tst(dst, Operand(~satval));
307 b(eq, &done);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100308 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100309 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
310 bind(&done);
311 } else {
312 usat(dst, satpos, src, cond);
313 }
314}
315
316
Steve Blocka7e24c12009-10-30 11:49:00 +0000317void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
318 // Empty the const pool.
319 CheckConstPool(true, true);
320 add(pc, pc, Operand(index,
321 LSL,
322 assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
323 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
324 nop(); // Jump table alignment.
325 for (int i = 0; i < targets.length(); i++) {
326 b(targets[i]);
327 }
328}
329
330
331void MacroAssembler::LoadRoot(Register destination,
332 Heap::RootListIndex index,
333 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +0000334 ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
Steve Blocka7e24c12009-10-30 11:49:00 +0000335}
336
337
Kristian Monsen25f61362010-05-21 11:50:48 +0100338void MacroAssembler::StoreRoot(Register source,
339 Heap::RootListIndex index,
340 Condition cond) {
341 str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
342}
343
344
Steve Block6ded16b2010-05-10 14:33:55 +0100345void MacroAssembler::RecordWriteHelper(Register object,
Steve Block8defd9f2010-07-08 12:39:36 +0100346 Register address,
347 Register scratch) {
Steve Block6ded16b2010-05-10 14:33:55 +0100348 if (FLAG_debug_code) {
349 // Check that the object is not in new space.
350 Label not_in_new_space;
Steve Block8defd9f2010-07-08 12:39:36 +0100351 InNewSpace(object, scratch, ne, &not_in_new_space);
Steve Block6ded16b2010-05-10 14:33:55 +0100352 Abort("new-space object passed to RecordWriteHelper");
353 bind(&not_in_new_space);
354 }
Leon Clarke4515c472010-02-03 11:58:03 +0000355
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100356 // Calculate page address.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100357 Bfc(object, 0, kPageSizeBits);
358
359 // Calculate region number.
Steve Block8defd9f2010-07-08 12:39:36 +0100360 Ubfx(address, address, Page::kRegionSizeLog2,
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100361 kPageSizeBits - Page::kRegionSizeLog2);
Steve Blocka7e24c12009-10-30 11:49:00 +0000362
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100363 // Mark region dirty.
Steve Block8defd9f2010-07-08 12:39:36 +0100364 ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000365 mov(ip, Operand(1));
Steve Block8defd9f2010-07-08 12:39:36 +0100366 orr(scratch, scratch, Operand(ip, LSL, address));
367 str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
Steve Block6ded16b2010-05-10 14:33:55 +0100368}
369
370
371void MacroAssembler::InNewSpace(Register object,
372 Register scratch,
373 Condition cc,
374 Label* branch) {
375 ASSERT(cc == eq || cc == ne);
376 and_(scratch, object, Operand(ExternalReference::new_space_mask()));
377 cmp(scratch, Operand(ExternalReference::new_space_start()));
378 b(cc, branch);
379}
380
381
382// Will clobber 4 registers: object, offset, scratch, ip. The
383// register 'object' contains a heap object pointer. The heap object
384// tag is shifted away.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100385void MacroAssembler::RecordWrite(Register object,
386 Operand offset,
387 Register scratch0,
388 Register scratch1) {
Steve Block6ded16b2010-05-10 14:33:55 +0100389 // The compiled code assumes that record write doesn't change the
390 // context register, so we check that none of the clobbered
391 // registers are cp.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100392 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Steve Block6ded16b2010-05-10 14:33:55 +0100393
394 Label done;
395
396 // First, test that the object is not in the new space. We cannot set
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100397 // region marks for new space pages.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100398 InNewSpace(object, scratch0, eq, &done);
Steve Block6ded16b2010-05-10 14:33:55 +0100399
Steve Block8defd9f2010-07-08 12:39:36 +0100400 // Add offset into the object.
401 add(scratch0, object, offset);
402
Steve Block6ded16b2010-05-10 14:33:55 +0100403 // Record the actual write.
Steve Block8defd9f2010-07-08 12:39:36 +0100404 RecordWriteHelper(object, scratch0, scratch1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000405
406 bind(&done);
Leon Clarke4515c472010-02-03 11:58:03 +0000407
408 // Clobber all input registers when running with the debug-code flag
409 // turned on to provoke errors.
410 if (FLAG_debug_code) {
Steve Block6ded16b2010-05-10 14:33:55 +0100411 mov(object, Operand(BitCast<int32_t>(kZapValue)));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100412 mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
413 mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
Leon Clarke4515c472010-02-03 11:58:03 +0000414 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000415}
416
417
Steve Block8defd9f2010-07-08 12:39:36 +0100418// Will clobber 4 registers: object, address, scratch, ip. The
419// register 'object' contains a heap object pointer. The heap object
420// tag is shifted away.
421void MacroAssembler::RecordWrite(Register object,
422 Register address,
423 Register scratch) {
424 // The compiled code assumes that record write doesn't change the
425 // context register, so we check that none of the clobbered
426 // registers are cp.
427 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
428
429 Label done;
430
431 // First, test that the object is not in the new space. We cannot set
432 // region marks for new space pages.
433 InNewSpace(object, scratch, eq, &done);
434
435 // Record the actual write.
436 RecordWriteHelper(object, address, scratch);
437
438 bind(&done);
439
440 // Clobber all input registers when running with the debug-code flag
441 // turned on to provoke errors.
442 if (FLAG_debug_code) {
443 mov(object, Operand(BitCast<int32_t>(kZapValue)));
444 mov(address, Operand(BitCast<int32_t>(kZapValue)));
445 mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
446 }
447}
448
449
Leon Clarkef7060e22010-06-03 12:02:55 +0100450void MacroAssembler::Ldrd(Register dst1, Register dst2,
451 const MemOperand& src, Condition cond) {
452 ASSERT(src.rm().is(no_reg));
453 ASSERT(!dst1.is(lr)); // r14.
454 ASSERT_EQ(0, dst1.code() % 2);
455 ASSERT_EQ(dst1.code() + 1, dst2.code());
456
457 // Generate two ldr instructions if ldrd is not available.
458 if (CpuFeatures::IsSupported(ARMv7)) {
459 CpuFeatures::Scope scope(ARMv7);
460 ldrd(dst1, dst2, src, cond);
461 } else {
462 MemOperand src2(src);
463 src2.set_offset(src2.offset() + 4);
464 if (dst1.is(src.rn())) {
465 ldr(dst2, src2, cond);
466 ldr(dst1, src, cond);
467 } else {
468 ldr(dst1, src, cond);
469 ldr(dst2, src2, cond);
470 }
471 }
472}
473
474
475void MacroAssembler::Strd(Register src1, Register src2,
476 const MemOperand& dst, Condition cond) {
477 ASSERT(dst.rm().is(no_reg));
478 ASSERT(!src1.is(lr)); // r14.
479 ASSERT_EQ(0, src1.code() % 2);
480 ASSERT_EQ(src1.code() + 1, src2.code());
481
482 // Generate two str instructions if strd is not available.
483 if (CpuFeatures::IsSupported(ARMv7)) {
484 CpuFeatures::Scope scope(ARMv7);
485 strd(src1, src2, dst, cond);
486 } else {
487 MemOperand dst2(dst);
488 dst2.set_offset(dst2.offset() + 4);
489 str(src1, dst, cond);
490 str(src2, dst2, cond);
491 }
492}
493
494
Steve Blocka7e24c12009-10-30 11:49:00 +0000495void MacroAssembler::EnterFrame(StackFrame::Type type) {
496 // r0-r3: preserved
497 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
498 mov(ip, Operand(Smi::FromInt(type)));
499 push(ip);
500 mov(ip, Operand(CodeObject()));
501 push(ip);
502 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
503}
504
505
506void MacroAssembler::LeaveFrame(StackFrame::Type type) {
507 // r0: preserved
508 // r1: preserved
509 // r2: preserved
510
511 // Drop the execution stack down to the frame pointer and restore
512 // the caller frame pointer and return address.
513 mov(sp, fp);
514 ldm(ia_w, sp, fp.bit() | lr.bit());
515}
516
517
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100518void MacroAssembler::EnterExitFrame() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000519 // Compute the argv pointer and keep it in a callee-saved register.
520 // r0 is argc.
521 add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
522 sub(r6, r6, Operand(kPointerSize));
523
524 // Compute callee's stack pointer before making changes and save it as
525 // ip register so that it is restored as sp register on exit, thereby
526 // popping the args.
527
528 // ip = sp + kPointerSize * #args;
529 add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
530
Steve Block6ded16b2010-05-10 14:33:55 +0100531 // Prepare the stack to be aligned when calling into C. After this point there
532 // are 5 pushes before the call into C, so the stack needs to be aligned after
533 // 5 pushes.
534 int frame_alignment = ActivationFrameAlignment();
535 int frame_alignment_mask = frame_alignment - 1;
536 if (frame_alignment != kPointerSize) {
537 // The following code needs to be more general if this assert does not hold.
538 ASSERT(frame_alignment == 2 * kPointerSize);
539 // With 5 pushes left the frame must be unaligned at this point.
540 mov(r7, Operand(Smi::FromInt(0)));
541 tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
542 push(r7, eq); // Push if aligned to make it unaligned.
543 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000544
545 // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
546 stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
Andrei Popescu402d9372010-02-26 13:31:12 +0000547 mov(fp, Operand(sp)); // Setup new frame pointer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000548
Andrei Popescu402d9372010-02-26 13:31:12 +0000549 mov(ip, Operand(CodeObject()));
550 push(ip); // Accessed from ExitFrame::code_slot.
Steve Blocka7e24c12009-10-30 11:49:00 +0000551
552 // Save the frame pointer and the context in top.
553 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
554 str(fp, MemOperand(ip));
555 mov(ip, Operand(ExternalReference(Top::k_context_address)));
556 str(cp, MemOperand(ip));
557
558 // Setup argc and the builtin function in callee-saved registers.
559 mov(r4, Operand(r0));
560 mov(r5, Operand(r1));
Steve Blocka7e24c12009-10-30 11:49:00 +0000561}
562
563
Steve Block6ded16b2010-05-10 14:33:55 +0100564void MacroAssembler::InitializeNewString(Register string,
565 Register length,
566 Heap::RootListIndex map_index,
567 Register scratch1,
568 Register scratch2) {
569 mov(scratch1, Operand(length, LSL, kSmiTagSize));
570 LoadRoot(scratch2, map_index);
571 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
572 mov(scratch1, Operand(String::kEmptyHashField));
573 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
574 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
575}
576
577
578int MacroAssembler::ActivationFrameAlignment() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000579#if defined(V8_HOST_ARCH_ARM)
580 // Running on the real platform. Use the alignment as mandated by the local
581 // environment.
582 // Note: This will break if we ever start generating snapshots on one ARM
583 // platform for another ARM platform with a different alignment.
Steve Block6ded16b2010-05-10 14:33:55 +0100584 return OS::ActivationFrameAlignment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000585#else // defined(V8_HOST_ARCH_ARM)
586 // If we are using the simulator then we should always align to the expected
587 // alignment. As the simulator is used to generate snapshots we do not know
Steve Block6ded16b2010-05-10 14:33:55 +0100588 // if the target platform will need alignment, so this is controlled from a
589 // flag.
590 return FLAG_sim_stack_alignment;
Steve Blocka7e24c12009-10-30 11:49:00 +0000591#endif // defined(V8_HOST_ARCH_ARM)
Steve Blocka7e24c12009-10-30 11:49:00 +0000592}
593
594
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100595void MacroAssembler::LeaveExitFrame() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000596 // Clear top frame.
Iain Merrick9ac36c92010-09-13 15:29:50 +0100597 mov(r3, Operand(0, RelocInfo::NONE));
Steve Blocka7e24c12009-10-30 11:49:00 +0000598 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
599 str(r3, MemOperand(ip));
600
601 // Restore current context from top and clear it in debug mode.
602 mov(ip, Operand(ExternalReference(Top::k_context_address)));
603 ldr(cp, MemOperand(ip));
604#ifdef DEBUG
605 str(r3, MemOperand(ip));
606#endif
607
608 // Pop the arguments, restore registers, and return.
609 mov(sp, Operand(fp)); // respect ABI stack constraint
610 ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
611}
612
613
614void MacroAssembler::InvokePrologue(const ParameterCount& expected,
615 const ParameterCount& actual,
616 Handle<Code> code_constant,
617 Register code_reg,
618 Label* done,
619 InvokeFlag flag) {
620 bool definitely_matches = false;
621 Label regular_invoke;
622
623 // Check whether the expected and actual arguments count match. If not,
624 // setup registers according to contract with ArgumentsAdaptorTrampoline:
625 // r0: actual arguments count
626 // r1: function (passed through to callee)
627 // r2: expected arguments count
628 // r3: callee code entry
629
630 // The code below is made a lot easier because the calling code already sets
631 // up actual and expected registers according to the contract if values are
632 // passed in registers.
633 ASSERT(actual.is_immediate() || actual.reg().is(r0));
634 ASSERT(expected.is_immediate() || expected.reg().is(r2));
635 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
636
637 if (expected.is_immediate()) {
638 ASSERT(actual.is_immediate());
639 if (expected.immediate() == actual.immediate()) {
640 definitely_matches = true;
641 } else {
642 mov(r0, Operand(actual.immediate()));
643 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
644 if (expected.immediate() == sentinel) {
645 // Don't worry about adapting arguments for builtins that
646 // don't want that done. Skip adaption code by making it look
647 // like we have a match between expected and actual number of
648 // arguments.
649 definitely_matches = true;
650 } else {
651 mov(r2, Operand(expected.immediate()));
652 }
653 }
654 } else {
655 if (actual.is_immediate()) {
656 cmp(expected.reg(), Operand(actual.immediate()));
657 b(eq, &regular_invoke);
658 mov(r0, Operand(actual.immediate()));
659 } else {
660 cmp(expected.reg(), Operand(actual.reg()));
661 b(eq, &regular_invoke);
662 }
663 }
664
665 if (!definitely_matches) {
666 if (!code_constant.is_null()) {
667 mov(r3, Operand(code_constant));
668 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
669 }
670
671 Handle<Code> adaptor =
672 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
673 if (flag == CALL_FUNCTION) {
674 Call(adaptor, RelocInfo::CODE_TARGET);
675 b(done);
676 } else {
677 Jump(adaptor, RelocInfo::CODE_TARGET);
678 }
679 bind(&regular_invoke);
680 }
681}
682
683
684void MacroAssembler::InvokeCode(Register code,
685 const ParameterCount& expected,
686 const ParameterCount& actual,
687 InvokeFlag flag) {
688 Label done;
689
690 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
691 if (flag == CALL_FUNCTION) {
692 Call(code);
693 } else {
694 ASSERT(flag == JUMP_FUNCTION);
695 Jump(code);
696 }
697
698 // Continue here if InvokePrologue does handle the invocation due to
699 // mismatched parameter counts.
700 bind(&done);
701}
702
703
704void MacroAssembler::InvokeCode(Handle<Code> code,
705 const ParameterCount& expected,
706 const ParameterCount& actual,
707 RelocInfo::Mode rmode,
708 InvokeFlag flag) {
709 Label done;
710
711 InvokePrologue(expected, actual, code, no_reg, &done, flag);
712 if (flag == CALL_FUNCTION) {
713 Call(code, rmode);
714 } else {
715 Jump(code, rmode);
716 }
717
718 // Continue here if InvokePrologue does handle the invocation due to
719 // mismatched parameter counts.
720 bind(&done);
721}
722
723
724void MacroAssembler::InvokeFunction(Register fun,
725 const ParameterCount& actual,
726 InvokeFlag flag) {
727 // Contract with called JS functions requires that function is passed in r1.
728 ASSERT(fun.is(r1));
729
730 Register expected_reg = r2;
731 Register code_reg = r3;
732
733 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
734 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
735 ldr(expected_reg,
736 FieldMemOperand(code_reg,
737 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100738 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000739 ldr(code_reg,
Steve Block791712a2010-08-27 10:21:07 +0100740 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000741
742 ParameterCount expected(expected_reg);
743 InvokeCode(code_reg, expected, actual, flag);
744}
745
746
Andrei Popescu402d9372010-02-26 13:31:12 +0000747void MacroAssembler::InvokeFunction(JSFunction* function,
748 const ParameterCount& actual,
749 InvokeFlag flag) {
750 ASSERT(function->is_compiled());
751
752 // Get the function and setup the context.
753 mov(r1, Operand(Handle<JSFunction>(function)));
754 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
755
756 // Invoke the cached code.
757 Handle<Code> code(function->code());
758 ParameterCount expected(function->shared()->formal_parameter_count());
759 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
760}
761
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100762
Steve Blocka7e24c12009-10-30 11:49:00 +0000763#ifdef ENABLE_DEBUGGER_SUPPORT
Andrei Popescu402d9372010-02-26 13:31:12 +0000764void MacroAssembler::DebugBreak() {
765 ASSERT(allow_stub_calls());
Iain Merrick9ac36c92010-09-13 15:29:50 +0100766 mov(r0, Operand(0, RelocInfo::NONE));
Andrei Popescu402d9372010-02-26 13:31:12 +0000767 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
768 CEntryStub ces(1);
769 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
770}
Steve Blocka7e24c12009-10-30 11:49:00 +0000771#endif
772
773
774void MacroAssembler::PushTryHandler(CodeLocation try_location,
775 HandlerType type) {
776 // Adjust this code if not the case.
777 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
778 // The pc (return address) is passed in register lr.
779 if (try_location == IN_JAVASCRIPT) {
780 if (type == TRY_CATCH_HANDLER) {
781 mov(r3, Operand(StackHandler::TRY_CATCH));
782 } else {
783 mov(r3, Operand(StackHandler::TRY_FINALLY));
784 }
785 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
786 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
787 && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
788 stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
789 // Save the current handler as the next handler.
790 mov(r3, Operand(ExternalReference(Top::k_handler_address)));
791 ldr(r1, MemOperand(r3));
792 ASSERT(StackHandlerConstants::kNextOffset == 0);
793 push(r1);
794 // Link this handler as the new current one.
795 str(sp, MemOperand(r3));
796 } else {
797 // Must preserve r0-r4, r5-r7 are available.
798 ASSERT(try_location == IN_JS_ENTRY);
799 // The frame pointer does not point to a JS frame so we save NULL
800 // for fp. We expect the code throwing an exception to check fp
801 // before dereferencing it to restore the context.
Iain Merrick9ac36c92010-09-13 15:29:50 +0100802 mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000803 mov(r6, Operand(StackHandler::ENTRY));
804 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
805 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
806 && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
807 stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
808 // Save the current handler as the next handler.
809 mov(r7, Operand(ExternalReference(Top::k_handler_address)));
810 ldr(r6, MemOperand(r7));
811 ASSERT(StackHandlerConstants::kNextOffset == 0);
812 push(r6);
813 // Link this handler as the new current one.
814 str(sp, MemOperand(r7));
815 }
816}
817
818
Leon Clarkee46be812010-01-19 14:06:41 +0000819void MacroAssembler::PopTryHandler() {
820 ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
821 pop(r1);
822 mov(ip, Operand(ExternalReference(Top::k_handler_address)));
823 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
824 str(r1, MemOperand(ip));
825}
826
827
Steve Blocka7e24c12009-10-30 11:49:00 +0000828void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
829 Register scratch,
830 Label* miss) {
831 Label same_contexts;
832
833 ASSERT(!holder_reg.is(scratch));
834 ASSERT(!holder_reg.is(ip));
835 ASSERT(!scratch.is(ip));
836
837 // Load current lexical context from the stack frame.
838 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
839 // In debug mode, make sure the lexical context is set.
840#ifdef DEBUG
Iain Merrick9ac36c92010-09-13 15:29:50 +0100841 cmp(scratch, Operand(0, RelocInfo::NONE));
Steve Blocka7e24c12009-10-30 11:49:00 +0000842 Check(ne, "we should not have an empty lexical context");
843#endif
844
845 // Load the global context of the current context.
846 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
847 ldr(scratch, FieldMemOperand(scratch, offset));
848 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
849
850 // Check the context is a global context.
851 if (FLAG_debug_code) {
852 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
853 // Cannot use ip as a temporary in this verification code. Due to the fact
854 // that ip is clobbered as part of cmp with an object Operand.
855 push(holder_reg); // Temporarily save holder on the stack.
856 // Read the first word and compare to the global_context_map.
857 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
858 LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
859 cmp(holder_reg, ip);
860 Check(eq, "JSGlobalObject::global_context should be a global context.");
861 pop(holder_reg); // Restore holder.
862 }
863
864 // Check if both contexts are the same.
865 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
866 cmp(scratch, Operand(ip));
867 b(eq, &same_contexts);
868
869 // Check the context is a global context.
870 if (FLAG_debug_code) {
871 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
872 // Cannot use ip as a temporary in this verification code. Due to the fact
873 // that ip is clobbered as part of cmp with an object Operand.
874 push(holder_reg); // Temporarily save holder on the stack.
875 mov(holder_reg, ip); // Move ip to its holding place.
876 LoadRoot(ip, Heap::kNullValueRootIndex);
877 cmp(holder_reg, ip);
878 Check(ne, "JSGlobalProxy::context() should not be null.");
879
880 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
881 LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
882 cmp(holder_reg, ip);
883 Check(eq, "JSGlobalObject::global_context should be a global context.");
884 // Restore ip is not needed. ip is reloaded below.
885 pop(holder_reg); // Restore holder.
886 // Restore ip to holder's context.
887 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
888 }
889
890 // Check that the security token in the calling global object is
891 // compatible with the security token in the receiving global
892 // object.
893 int token_offset = Context::kHeaderSize +
894 Context::SECURITY_TOKEN_INDEX * kPointerSize;
895
896 ldr(scratch, FieldMemOperand(scratch, token_offset));
897 ldr(ip, FieldMemOperand(ip, token_offset));
898 cmp(scratch, Operand(ip));
899 b(ne, miss);
900
901 bind(&same_contexts);
902}
903
904
905void MacroAssembler::AllocateInNewSpace(int object_size,
906 Register result,
907 Register scratch1,
908 Register scratch2,
909 Label* gc_required,
910 AllocationFlags flags) {
John Reck59135872010-11-02 12:39:01 -0700911 if (!FLAG_inline_new) {
912 if (FLAG_debug_code) {
913 // Trash the registers to simulate an allocation failure.
914 mov(result, Operand(0x7091));
915 mov(scratch1, Operand(0x7191));
916 mov(scratch2, Operand(0x7291));
917 }
918 jmp(gc_required);
919 return;
920 }
921
Steve Blocka7e24c12009-10-30 11:49:00 +0000922 ASSERT(!result.is(scratch1));
923 ASSERT(!scratch1.is(scratch2));
924
Kristian Monsen25f61362010-05-21 11:50:48 +0100925 // Make object size into bytes.
926 if ((flags & SIZE_IN_WORDS) != 0) {
927 object_size *= kPointerSize;
928 }
929 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
930
Steve Blocka7e24c12009-10-30 11:49:00 +0000931 // Load address of new object into result and allocation top address into
932 // scratch1.
933 ExternalReference new_space_allocation_top =
934 ExternalReference::new_space_allocation_top_address();
935 mov(scratch1, Operand(new_space_allocation_top));
936 if ((flags & RESULT_CONTAINS_TOP) == 0) {
937 ldr(result, MemOperand(scratch1));
Steve Blockd0582a62009-12-15 09:54:21 +0000938 } else if (FLAG_debug_code) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000939 // Assert that result actually contains top on entry. scratch2 is used
940 // immediately below so this use of scratch2 does not cause difference with
941 // respect to register content between debug and release mode.
942 ldr(scratch2, MemOperand(scratch1));
943 cmp(result, scratch2);
944 Check(eq, "Unexpected allocation top");
Steve Blocka7e24c12009-10-30 11:49:00 +0000945 }
946
947 // Calculate new top and bail out if new space is exhausted. Use result
948 // to calculate the new top.
949 ExternalReference new_space_allocation_limit =
950 ExternalReference::new_space_allocation_limit_address();
951 mov(scratch2, Operand(new_space_allocation_limit));
952 ldr(scratch2, MemOperand(scratch2));
Kristian Monsen25f61362010-05-21 11:50:48 +0100953 add(result, result, Operand(object_size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000954 cmp(result, Operand(scratch2));
955 b(hi, gc_required);
Steve Blocka7e24c12009-10-30 11:49:00 +0000956 str(result, MemOperand(scratch1));
957
958 // Tag and adjust back to start of new object.
959 if ((flags & TAG_OBJECT) != 0) {
Kristian Monsen25f61362010-05-21 11:50:48 +0100960 sub(result, result, Operand(object_size - kHeapObjectTag));
Steve Blocka7e24c12009-10-30 11:49:00 +0000961 } else {
Kristian Monsen25f61362010-05-21 11:50:48 +0100962 sub(result, result, Operand(object_size));
Steve Blocka7e24c12009-10-30 11:49:00 +0000963 }
964}
965
966
967void MacroAssembler::AllocateInNewSpace(Register object_size,
968 Register result,
969 Register scratch1,
970 Register scratch2,
971 Label* gc_required,
972 AllocationFlags flags) {
John Reck59135872010-11-02 12:39:01 -0700973 if (!FLAG_inline_new) {
974 if (FLAG_debug_code) {
975 // Trash the registers to simulate an allocation failure.
976 mov(result, Operand(0x7091));
977 mov(scratch1, Operand(0x7191));
978 mov(scratch2, Operand(0x7291));
979 }
980 jmp(gc_required);
981 return;
982 }
983
Steve Blocka7e24c12009-10-30 11:49:00 +0000984 ASSERT(!result.is(scratch1));
985 ASSERT(!scratch1.is(scratch2));
986
987 // Load address of new object into result and allocation top address into
988 // scratch1.
989 ExternalReference new_space_allocation_top =
990 ExternalReference::new_space_allocation_top_address();
991 mov(scratch1, Operand(new_space_allocation_top));
992 if ((flags & RESULT_CONTAINS_TOP) == 0) {
993 ldr(result, MemOperand(scratch1));
Steve Blockd0582a62009-12-15 09:54:21 +0000994 } else if (FLAG_debug_code) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000995 // Assert that result actually contains top on entry. scratch2 is used
996 // immediately below so this use of scratch2 does not cause difference with
997 // respect to register content between debug and release mode.
998 ldr(scratch2, MemOperand(scratch1));
999 cmp(result, scratch2);
1000 Check(eq, "Unexpected allocation top");
Steve Blocka7e24c12009-10-30 11:49:00 +00001001 }
1002
1003 // Calculate new top and bail out if new space is exhausted. Use result
1004 // to calculate the new top. Object size is in words so a shift is required to
1005 // get the number of bytes
1006 ExternalReference new_space_allocation_limit =
1007 ExternalReference::new_space_allocation_limit_address();
1008 mov(scratch2, Operand(new_space_allocation_limit));
1009 ldr(scratch2, MemOperand(scratch2));
Kristian Monsen25f61362010-05-21 11:50:48 +01001010 if ((flags & SIZE_IN_WORDS) != 0) {
1011 add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
1012 } else {
1013 add(result, result, Operand(object_size));
1014 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001015 cmp(result, Operand(scratch2));
1016 b(hi, gc_required);
1017
Steve Blockd0582a62009-12-15 09:54:21 +00001018 // Update allocation top. result temporarily holds the new top.
1019 if (FLAG_debug_code) {
1020 tst(result, Operand(kObjectAlignmentMask));
1021 Check(eq, "Unaligned allocation in new space");
1022 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 str(result, MemOperand(scratch1));
1024
1025 // Adjust back to start of new object.
Kristian Monsen25f61362010-05-21 11:50:48 +01001026 if ((flags & SIZE_IN_WORDS) != 0) {
1027 sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
1028 } else {
1029 sub(result, result, Operand(object_size));
1030 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001031
1032 // Tag object if requested.
1033 if ((flags & TAG_OBJECT) != 0) {
1034 add(result, result, Operand(kHeapObjectTag));
1035 }
1036}
1037
1038
1039void MacroAssembler::UndoAllocationInNewSpace(Register object,
1040 Register scratch) {
1041 ExternalReference new_space_allocation_top =
1042 ExternalReference::new_space_allocation_top_address();
1043
1044 // Make sure the object has no tag before resetting top.
1045 and_(object, object, Operand(~kHeapObjectTagMask));
1046#ifdef DEBUG
1047 // Check that the object un-allocated is below the current top.
1048 mov(scratch, Operand(new_space_allocation_top));
1049 ldr(scratch, MemOperand(scratch));
1050 cmp(object, scratch);
1051 Check(lt, "Undo allocation of non allocated memory");
1052#endif
1053 // Write the address of the object to un-allocate as the current top.
1054 mov(scratch, Operand(new_space_allocation_top));
1055 str(object, MemOperand(scratch));
1056}
1057
1058
Andrei Popescu31002712010-02-23 13:46:05 +00001059void MacroAssembler::AllocateTwoByteString(Register result,
1060 Register length,
1061 Register scratch1,
1062 Register scratch2,
1063 Register scratch3,
1064 Label* gc_required) {
1065 // Calculate the number of bytes needed for the characters in the string while
1066 // observing object alignment.
1067 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1068 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1069 add(scratch1, scratch1,
1070 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
Kristian Monsen25f61362010-05-21 11:50:48 +01001071 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
Andrei Popescu31002712010-02-23 13:46:05 +00001072
1073 // Allocate two-byte string in new space.
1074 AllocateInNewSpace(scratch1,
1075 result,
1076 scratch2,
1077 scratch3,
1078 gc_required,
1079 TAG_OBJECT);
1080
1081 // Set the map, length and hash field.
Steve Block6ded16b2010-05-10 14:33:55 +01001082 InitializeNewString(result,
1083 length,
1084 Heap::kStringMapRootIndex,
1085 scratch1,
1086 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001087}
1088
1089
1090void MacroAssembler::AllocateAsciiString(Register result,
1091 Register length,
1092 Register scratch1,
1093 Register scratch2,
1094 Register scratch3,
1095 Label* gc_required) {
1096 // Calculate the number of bytes needed for the characters in the string while
1097 // observing object alignment.
1098 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1099 ASSERT(kCharSize == 1);
1100 add(scratch1, length,
1101 Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
Kristian Monsen25f61362010-05-21 11:50:48 +01001102 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
Andrei Popescu31002712010-02-23 13:46:05 +00001103
1104 // Allocate ASCII string in new space.
1105 AllocateInNewSpace(scratch1,
1106 result,
1107 scratch2,
1108 scratch3,
1109 gc_required,
1110 TAG_OBJECT);
1111
1112 // Set the map, length and hash field.
Steve Block6ded16b2010-05-10 14:33:55 +01001113 InitializeNewString(result,
1114 length,
1115 Heap::kAsciiStringMapRootIndex,
1116 scratch1,
1117 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001118}
1119
1120
1121void MacroAssembler::AllocateTwoByteConsString(Register result,
1122 Register length,
1123 Register scratch1,
1124 Register scratch2,
1125 Label* gc_required) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001126 AllocateInNewSpace(ConsString::kSize,
Andrei Popescu31002712010-02-23 13:46:05 +00001127 result,
1128 scratch1,
1129 scratch2,
1130 gc_required,
1131 TAG_OBJECT);
Steve Block6ded16b2010-05-10 14:33:55 +01001132
1133 InitializeNewString(result,
1134 length,
1135 Heap::kConsStringMapRootIndex,
1136 scratch1,
1137 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001138}
1139
1140
1141void MacroAssembler::AllocateAsciiConsString(Register result,
1142 Register length,
1143 Register scratch1,
1144 Register scratch2,
1145 Label* gc_required) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001146 AllocateInNewSpace(ConsString::kSize,
Andrei Popescu31002712010-02-23 13:46:05 +00001147 result,
1148 scratch1,
1149 scratch2,
1150 gc_required,
1151 TAG_OBJECT);
Steve Block6ded16b2010-05-10 14:33:55 +01001152
1153 InitializeNewString(result,
1154 length,
1155 Heap::kConsAsciiStringMapRootIndex,
1156 scratch1,
1157 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001158}
1159
1160
Steve Block6ded16b2010-05-10 14:33:55 +01001161void MacroAssembler::CompareObjectType(Register object,
Steve Blocka7e24c12009-10-30 11:49:00 +00001162 Register map,
1163 Register type_reg,
1164 InstanceType type) {
Steve Block6ded16b2010-05-10 14:33:55 +01001165 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00001166 CompareInstanceType(map, type_reg, type);
1167}
1168
1169
1170void MacroAssembler::CompareInstanceType(Register map,
1171 Register type_reg,
1172 InstanceType type) {
1173 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1174 cmp(type_reg, Operand(type));
1175}
1176
1177
Andrei Popescu31002712010-02-23 13:46:05 +00001178void MacroAssembler::CheckMap(Register obj,
1179 Register scratch,
1180 Handle<Map> map,
1181 Label* fail,
1182 bool is_heap_object) {
1183 if (!is_heap_object) {
1184 BranchOnSmi(obj, fail);
1185 }
1186 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1187 mov(ip, Operand(map));
1188 cmp(scratch, ip);
1189 b(ne, fail);
1190}
1191
1192
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001193void MacroAssembler::CheckMap(Register obj,
1194 Register scratch,
1195 Heap::RootListIndex index,
1196 Label* fail,
1197 bool is_heap_object) {
1198 if (!is_heap_object) {
1199 BranchOnSmi(obj, fail);
1200 }
1201 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1202 LoadRoot(ip, index);
1203 cmp(scratch, ip);
1204 b(ne, fail);
1205}
1206
1207
Steve Blocka7e24c12009-10-30 11:49:00 +00001208void MacroAssembler::TryGetFunctionPrototype(Register function,
1209 Register result,
1210 Register scratch,
1211 Label* miss) {
1212 // Check that the receiver isn't a smi.
1213 BranchOnSmi(function, miss);
1214
1215 // Check that the function really is a function. Load map into result reg.
1216 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1217 b(ne, miss);
1218
1219 // Make sure that the function has an instance prototype.
1220 Label non_instance;
1221 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1222 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1223 b(ne, &non_instance);
1224
1225 // Get the prototype or initial map from the function.
1226 ldr(result,
1227 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1228
1229 // If the prototype or initial map is the hole, don't return it and
1230 // simply miss the cache instead. This will allow us to allocate a
1231 // prototype object on-demand in the runtime system.
1232 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1233 cmp(result, ip);
1234 b(eq, miss);
1235
1236 // If the function does not have an initial map, we're done.
1237 Label done;
1238 CompareObjectType(result, scratch, scratch, MAP_TYPE);
1239 b(ne, &done);
1240
1241 // Get the prototype from the initial map.
1242 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
1243 jmp(&done);
1244
1245 // Non-instance prototype: Fetch prototype from constructor field
1246 // in initial map.
1247 bind(&non_instance);
1248 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
1249
1250 // All done.
1251 bind(&done);
1252}
1253
1254
1255void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1256 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
1257 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1258}
1259
1260
Andrei Popescu31002712010-02-23 13:46:05 +00001261void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1262 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
1263 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1264}
1265
1266
Steve Blocka7e24c12009-10-30 11:49:00 +00001267void MacroAssembler::IllegalOperation(int num_arguments) {
1268 if (num_arguments > 0) {
1269 add(sp, sp, Operand(num_arguments * kPointerSize));
1270 }
1271 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1272}
1273
1274
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001275void MacroAssembler::IndexFromHash(Register hash, Register index) {
1276 // If the hash field contains an array index pick it out. The assert checks
1277 // that the constants for the maximum number of digits for an array index
1278 // cached in the hash field and the number of bits reserved for it does not
1279 // conflict.
1280 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
1281 (1 << String::kArrayIndexValueBits));
1282 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
1283 // the low kHashShift bits.
1284 STATIC_ASSERT(kSmiTag == 0);
1285 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
1286 mov(index, Operand(hash, LSL, kSmiTagSize));
1287}
1288
1289
Steve Blockd0582a62009-12-15 09:54:21 +00001290void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
1291 Register outHighReg,
1292 Register outLowReg) {
1293 // ARMv7 VFP3 instructions to implement integer to double conversion.
1294 mov(r7, Operand(inReg, ASR, kSmiTagSize));
Leon Clarkee46be812010-01-19 14:06:41 +00001295 vmov(s15, r7);
Steve Block6ded16b2010-05-10 14:33:55 +01001296 vcvt_f64_s32(d7, s15);
Leon Clarkee46be812010-01-19 14:06:41 +00001297 vmov(outLowReg, outHighReg, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00001298}
1299
1300
Steve Block8defd9f2010-07-08 12:39:36 +01001301void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
1302 DwVfpRegister result,
1303 Register scratch1,
1304 Register scratch2,
1305 Register heap_number_map,
1306 SwVfpRegister scratch3,
1307 Label* not_number,
1308 ObjectToDoubleFlags flags) {
1309 Label done;
1310 if ((flags & OBJECT_NOT_SMI) == 0) {
1311 Label not_smi;
1312 BranchOnNotSmi(object, &not_smi);
1313 // Remove smi tag and convert to double.
1314 mov(scratch1, Operand(object, ASR, kSmiTagSize));
1315 vmov(scratch3, scratch1);
1316 vcvt_f64_s32(result, scratch3);
1317 b(&done);
1318 bind(&not_smi);
1319 }
1320 // Check for heap number and load double value from it.
1321 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
1322 sub(scratch2, object, Operand(kHeapObjectTag));
1323 cmp(scratch1, heap_number_map);
1324 b(ne, not_number);
1325 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
1326 // If exponent is all ones the number is either a NaN or +/-Infinity.
1327 ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
1328 Sbfx(scratch1,
1329 scratch1,
1330 HeapNumber::kExponentShift,
1331 HeapNumber::kExponentBits);
1332 // All-one value sign extend to -1.
1333 cmp(scratch1, Operand(-1));
1334 b(eq, not_number);
1335 }
1336 vldr(result, scratch2, HeapNumber::kValueOffset);
1337 bind(&done);
1338}
1339
1340
1341void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
1342 DwVfpRegister value,
1343 Register scratch1,
1344 SwVfpRegister scratch2) {
1345 mov(scratch1, Operand(smi, ASR, kSmiTagSize));
1346 vmov(scratch2, scratch1);
1347 vcvt_f64_s32(value, scratch2);
1348}
1349
1350
Iain Merrick9ac36c92010-09-13 15:29:50 +01001351// Tries to get a signed int32 out of a double precision floating point heap
1352// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1353// 32bits signed integer range.
1354void MacroAssembler::ConvertToInt32(Register source,
1355 Register dest,
1356 Register scratch,
1357 Register scratch2,
1358 Label *not_int32) {
1359 if (CpuFeatures::IsSupported(VFP3)) {
1360 CpuFeatures::Scope scope(VFP3);
1361 sub(scratch, source, Operand(kHeapObjectTag));
1362 vldr(d0, scratch, HeapNumber::kValueOffset);
1363 vcvt_s32_f64(s0, d0);
1364 vmov(dest, s0);
1365 // Signed vcvt instruction will saturate to the minimum (0x80000000) or
1366 // maximun (0x7fffffff) signed 32bits integer when the double is out of
1367 // range. When substracting one, the minimum signed integer becomes the
1368 // maximun signed integer.
1369 sub(scratch, dest, Operand(1));
1370 cmp(scratch, Operand(LONG_MAX - 1));
1371 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
1372 b(ge, not_int32);
1373 } else {
1374 // This code is faster for doubles that are in the ranges -0x7fffffff to
1375 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
1376 // the range of signed int32 values that are not Smis. Jumps to the label
1377 // 'not_int32' if the double isn't in the range -0x80000000.0 to
1378 // 0x80000000.0 (excluding the endpoints).
1379 Label right_exponent, done;
1380 // Get exponent word.
1381 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1382 // Get exponent alone in scratch2.
1383 Ubfx(scratch2,
1384 scratch,
1385 HeapNumber::kExponentShift,
1386 HeapNumber::kExponentBits);
1387 // Load dest with zero. We use this either for the final shift or
1388 // for the answer.
1389 mov(dest, Operand(0, RelocInfo::NONE));
1390 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1391 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1392 // the exponent that we are fastest at and also the highest exponent we can
1393 // handle here.
1394 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
1395 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
1396 // split it up to avoid a constant pool entry. You can't do that in general
1397 // for cmp because of the overflow flag, but we know the exponent is in the
1398 // range 0-2047 so there is no overflow.
1399 int fudge_factor = 0x400;
1400 sub(scratch2, scratch2, Operand(fudge_factor));
1401 cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
1402 // If we have a match of the int32-but-not-Smi exponent then skip some
1403 // logic.
1404 b(eq, &right_exponent);
1405 // If the exponent is higher than that then go to slow case. This catches
1406 // numbers that don't fit in a signed int32, infinities and NaNs.
1407 b(gt, not_int32);
1408
1409 // We know the exponent is smaller than 30 (biased). If it is less than
1410 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
1411 // it rounds to zero.
1412 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
1413 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
1414 // Dest already has a Smi zero.
1415 b(lt, &done);
1416
1417 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
1418 // get how much to shift down.
1419 rsb(dest, scratch2, Operand(30));
1420
1421 bind(&right_exponent);
1422 // Get the top bits of the mantissa.
1423 and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
1424 // Put back the implicit 1.
1425 orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
1426 // Shift up the mantissa bits to take up the space the exponent used to
1427 // take. We just orred in the implicit bit so that took care of one and
1428 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1429 // distance.
1430 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1431 mov(scratch2, Operand(scratch2, LSL, shift_distance));
1432 // Put sign in zero flag.
1433 tst(scratch, Operand(HeapNumber::kSignMask));
1434 // Get the second half of the double. For some exponents we don't
1435 // actually need this because the bits get shifted out again, but
1436 // it's probably slower to test than just to do it.
1437 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1438 // Shift down 22 bits to get the last 10 bits.
1439 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
1440 // Move down according to the exponent.
1441 mov(dest, Operand(scratch, LSR, dest));
1442 // Fix sign if sign bit was set.
1443 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1444 bind(&done);
1445 }
1446}
1447
1448
Andrei Popescu31002712010-02-23 13:46:05 +00001449void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1450 Register src,
1451 int num_least_bits) {
1452 if (CpuFeatures::IsSupported(ARMv7)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001453 ubfx(dst, src, kSmiTagSize, num_least_bits);
Andrei Popescu31002712010-02-23 13:46:05 +00001454 } else {
1455 mov(dst, Operand(src, ASR, kSmiTagSize));
1456 and_(dst, dst, Operand((1 << num_least_bits) - 1));
1457 }
1458}
1459
1460
Steve Blocka7e24c12009-10-30 11:49:00 +00001461void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
1462 // All parameters are on the stack. r0 has the return value after call.
1463
1464 // If the expected number of arguments of the runtime function is
1465 // constant, we check that the actual number of arguments match the
1466 // expectation.
1467 if (f->nargs >= 0 && f->nargs != num_arguments) {
1468 IllegalOperation(num_arguments);
1469 return;
1470 }
1471
Leon Clarke4515c472010-02-03 11:58:03 +00001472 // TODO(1236192): Most runtime routines don't need the number of
1473 // arguments passed in because it is constant. At some point we
1474 // should remove this need and make the runtime routine entry code
1475 // smarter.
1476 mov(r0, Operand(num_arguments));
1477 mov(r1, Operand(ExternalReference(f)));
1478 CEntryStub stub(1);
Steve Blocka7e24c12009-10-30 11:49:00 +00001479 CallStub(&stub);
1480}
1481
1482
1483void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1484 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1485}
1486
1487
Andrei Popescu402d9372010-02-26 13:31:12 +00001488void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1489 int num_arguments) {
1490 mov(r0, Operand(num_arguments));
1491 mov(r1, Operand(ext));
1492
1493 CEntryStub stub(1);
1494 CallStub(&stub);
1495}
1496
1497
Steve Block6ded16b2010-05-10 14:33:55 +01001498void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1499 int num_arguments,
1500 int result_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001501 // TODO(1236192): Most runtime routines don't need the number of
1502 // arguments passed in because it is constant. At some point we
1503 // should remove this need and make the runtime routine entry code
1504 // smarter.
1505 mov(r0, Operand(num_arguments));
Steve Block6ded16b2010-05-10 14:33:55 +01001506 JumpToExternalReference(ext);
Steve Blocka7e24c12009-10-30 11:49:00 +00001507}
1508
1509
Steve Block6ded16b2010-05-10 14:33:55 +01001510void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1511 int num_arguments,
1512 int result_size) {
1513 TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
1514}
1515
1516
1517void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001518#if defined(__thumb__)
1519 // Thumb mode builtin.
1520 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
1521#endif
1522 mov(r1, Operand(builtin));
1523 CEntryStub stub(1);
1524 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1525}
1526
1527
Steve Blocka7e24c12009-10-30 11:49:00 +00001528void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1529 InvokeJSFlags flags) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001530 GetBuiltinEntry(r2, id);
Steve Blocka7e24c12009-10-30 11:49:00 +00001531 if (flags == CALL_JS) {
Andrei Popescu402d9372010-02-26 13:31:12 +00001532 Call(r2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001533 } else {
1534 ASSERT(flags == JUMP_JS);
Andrei Popescu402d9372010-02-26 13:31:12 +00001535 Jump(r2);
Steve Blocka7e24c12009-10-30 11:49:00 +00001536 }
1537}
1538
1539
Steve Block791712a2010-08-27 10:21:07 +01001540void MacroAssembler::GetBuiltinFunction(Register target,
1541 Builtins::JavaScript id) {
Steve Block6ded16b2010-05-10 14:33:55 +01001542 // Load the builtins object into target register.
1543 ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
1544 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
Andrei Popescu402d9372010-02-26 13:31:12 +00001545 // Load the JavaScript builtin function from the builtins object.
Steve Block6ded16b2010-05-10 14:33:55 +01001546 ldr(target, FieldMemOperand(target,
Steve Block791712a2010-08-27 10:21:07 +01001547 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1548}
1549
1550
1551void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
1552 ASSERT(!target.is(r1));
1553 GetBuiltinFunction(r1, id);
1554 // Load the code entry point from the builtins object.
1555 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00001556}
1557
1558
1559void MacroAssembler::SetCounter(StatsCounter* counter, int value,
1560 Register scratch1, Register scratch2) {
1561 if (FLAG_native_code_counters && counter->Enabled()) {
1562 mov(scratch1, Operand(value));
1563 mov(scratch2, Operand(ExternalReference(counter)));
1564 str(scratch1, MemOperand(scratch2));
1565 }
1566}
1567
1568
1569void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1570 Register scratch1, Register scratch2) {
1571 ASSERT(value > 0);
1572 if (FLAG_native_code_counters && counter->Enabled()) {
1573 mov(scratch2, Operand(ExternalReference(counter)));
1574 ldr(scratch1, MemOperand(scratch2));
1575 add(scratch1, scratch1, Operand(value));
1576 str(scratch1, MemOperand(scratch2));
1577 }
1578}
1579
1580
1581void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1582 Register scratch1, Register scratch2) {
1583 ASSERT(value > 0);
1584 if (FLAG_native_code_counters && counter->Enabled()) {
1585 mov(scratch2, Operand(ExternalReference(counter)));
1586 ldr(scratch1, MemOperand(scratch2));
1587 sub(scratch1, scratch1, Operand(value));
1588 str(scratch1, MemOperand(scratch2));
1589 }
1590}
1591
1592
1593void MacroAssembler::Assert(Condition cc, const char* msg) {
1594 if (FLAG_debug_code)
1595 Check(cc, msg);
1596}
1597
1598
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001599void MacroAssembler::AssertRegisterIsRoot(Register reg,
1600 Heap::RootListIndex index) {
1601 if (FLAG_debug_code) {
1602 LoadRoot(ip, index);
1603 cmp(reg, ip);
1604 Check(eq, "Register did not match expected root");
1605 }
1606}
1607
1608
Iain Merrick75681382010-08-19 15:07:18 +01001609void MacroAssembler::AssertFastElements(Register elements) {
1610 if (FLAG_debug_code) {
1611 ASSERT(!elements.is(ip));
1612 Label ok;
1613 push(elements);
1614 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
1615 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
1616 cmp(elements, ip);
1617 b(eq, &ok);
1618 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
1619 cmp(elements, ip);
1620 b(eq, &ok);
1621 Abort("JSObject with fast elements map has slow elements");
1622 bind(&ok);
1623 pop(elements);
1624 }
1625}
1626
1627
Steve Blocka7e24c12009-10-30 11:49:00 +00001628void MacroAssembler::Check(Condition cc, const char* msg) {
1629 Label L;
1630 b(cc, &L);
1631 Abort(msg);
1632 // will not return here
1633 bind(&L);
1634}
1635
1636
1637void MacroAssembler::Abort(const char* msg) {
Steve Block8defd9f2010-07-08 12:39:36 +01001638 Label abort_start;
1639 bind(&abort_start);
Steve Blocka7e24c12009-10-30 11:49:00 +00001640 // We want to pass the msg string like a smi to avoid GC
1641 // problems, however msg is not guaranteed to be aligned
1642 // properly. Instead, we pass an aligned pointer that is
1643 // a proper v8 smi, but also pass the alignment difference
1644 // from the real pointer as a smi.
1645 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
1646 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
1647 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
1648#ifdef DEBUG
1649 if (msg != NULL) {
1650 RecordComment("Abort message: ");
1651 RecordComment(msg);
1652 }
1653#endif
Steve Blockd0582a62009-12-15 09:54:21 +00001654 // Disable stub call restrictions to always allow calls to abort.
1655 set_allow_stub_calls(true);
1656
Steve Blocka7e24c12009-10-30 11:49:00 +00001657 mov(r0, Operand(p0));
1658 push(r0);
1659 mov(r0, Operand(Smi::FromInt(p1 - p0)));
1660 push(r0);
1661 CallRuntime(Runtime::kAbort, 2);
1662 // will not return here
Steve Block8defd9f2010-07-08 12:39:36 +01001663 if (is_const_pool_blocked()) {
1664 // If the calling code cares about the exact number of
1665 // instructions generated, we insert padding here to keep the size
1666 // of the Abort macro constant.
1667 static const int kExpectedAbortInstructions = 10;
1668 int abort_instructions = InstructionsGeneratedSince(&abort_start);
1669 ASSERT(abort_instructions <= kExpectedAbortInstructions);
1670 while (abort_instructions++ < kExpectedAbortInstructions) {
1671 nop();
1672 }
1673 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001674}
1675
1676
Steve Blockd0582a62009-12-15 09:54:21 +00001677void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
1678 if (context_chain_length > 0) {
1679 // Move up the chain of contexts to the context containing the slot.
1680 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
1681 // Load the function context (which is the incoming, outer context).
1682 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
1683 for (int i = 1; i < context_chain_length; i++) {
1684 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
1685 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
1686 }
1687 // The context may be an intermediate context, not a function context.
1688 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
1689 } else { // Slot is in the current function context.
1690 // The context may be an intermediate context, not a function context.
1691 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
1692 }
1693}
1694
1695
Andrei Popescu31002712010-02-23 13:46:05 +00001696void MacroAssembler::JumpIfNotBothSmi(Register reg1,
1697 Register reg2,
1698 Label* on_not_both_smi) {
1699 ASSERT_EQ(0, kSmiTag);
1700 tst(reg1, Operand(kSmiTagMask));
1701 tst(reg2, Operand(kSmiTagMask), eq);
1702 b(ne, on_not_both_smi);
1703}
1704
1705
1706void MacroAssembler::JumpIfEitherSmi(Register reg1,
1707 Register reg2,
1708 Label* on_either_smi) {
1709 ASSERT_EQ(0, kSmiTag);
1710 tst(reg1, Operand(kSmiTagMask));
1711 tst(reg2, Operand(kSmiTagMask), ne);
1712 b(eq, on_either_smi);
1713}
1714
1715
Iain Merrick75681382010-08-19 15:07:18 +01001716void MacroAssembler::AbortIfSmi(Register object) {
1717 ASSERT_EQ(0, kSmiTag);
1718 tst(object, Operand(kSmiTagMask));
1719 Assert(ne, "Operand is a smi");
1720}
1721
1722
Leon Clarked91b9f72010-01-27 17:25:45 +00001723void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
1724 Register first,
1725 Register second,
1726 Register scratch1,
1727 Register scratch2,
1728 Label* failure) {
1729 // Test that both first and second are sequential ASCII strings.
1730 // Assume that they are non-smis.
1731 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
1732 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
1733 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
1734 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01001735
1736 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
1737 scratch2,
1738 scratch1,
1739 scratch2,
1740 failure);
Leon Clarked91b9f72010-01-27 17:25:45 +00001741}
1742
1743void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
1744 Register second,
1745 Register scratch1,
1746 Register scratch2,
1747 Label* failure) {
1748 // Check that neither is a smi.
1749 ASSERT_EQ(0, kSmiTag);
1750 and_(scratch1, first, Operand(second));
1751 tst(scratch1, Operand(kSmiTagMask));
1752 b(eq, failure);
1753 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
1754 second,
1755 scratch1,
1756 scratch2,
1757 failure);
1758}
1759
Steve Blockd0582a62009-12-15 09:54:21 +00001760
Steve Block6ded16b2010-05-10 14:33:55 +01001761// Allocates a heap number or jumps to the need_gc label if the young space
1762// is full and a scavenge is needed.
1763void MacroAssembler::AllocateHeapNumber(Register result,
1764 Register scratch1,
1765 Register scratch2,
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001766 Register heap_number_map,
Steve Block6ded16b2010-05-10 14:33:55 +01001767 Label* gc_required) {
1768 // Allocate an object in the heap for the heap number and tag it as a heap
1769 // object.
Kristian Monsen25f61362010-05-21 11:50:48 +01001770 AllocateInNewSpace(HeapNumber::kSize,
Steve Block6ded16b2010-05-10 14:33:55 +01001771 result,
1772 scratch1,
1773 scratch2,
1774 gc_required,
1775 TAG_OBJECT);
1776
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001777 // Store heap number map in the allocated object.
1778 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1779 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01001780}
1781
1782
Steve Block8defd9f2010-07-08 12:39:36 +01001783void MacroAssembler::AllocateHeapNumberWithValue(Register result,
1784 DwVfpRegister value,
1785 Register scratch1,
1786 Register scratch2,
1787 Register heap_number_map,
1788 Label* gc_required) {
1789 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
1790 sub(scratch1, result, Operand(kHeapObjectTag));
1791 vstr(value, scratch1, HeapNumber::kValueOffset);
1792}
1793
1794
Ben Murdochbb769b22010-08-11 14:56:33 +01001795// Copies a fixed number of fields of heap objects from src to dst.
1796void MacroAssembler::CopyFields(Register dst,
1797 Register src,
1798 RegList temps,
1799 int field_count) {
1800 // At least one bit set in the first 15 registers.
1801 ASSERT((temps & ((1 << 15) - 1)) != 0);
1802 ASSERT((temps & dst.bit()) == 0);
1803 ASSERT((temps & src.bit()) == 0);
1804 // Primitive implementation using only one temporary register.
1805
1806 Register tmp = no_reg;
1807 // Find a temp register in temps list.
1808 for (int i = 0; i < 15; i++) {
1809 if ((temps & (1 << i)) != 0) {
1810 tmp.set_code(i);
1811 break;
1812 }
1813 }
1814 ASSERT(!tmp.is(no_reg));
1815
1816 for (int i = 0; i < field_count; i++) {
1817 ldr(tmp, FieldMemOperand(src, i * kPointerSize));
1818 str(tmp, FieldMemOperand(dst, i * kPointerSize));
1819 }
1820}
1821
1822
Steve Block8defd9f2010-07-08 12:39:36 +01001823void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
1824 Register source, // Input.
1825 Register scratch) {
1826 ASSERT(!zeros.is(source) || !source.is(zeros));
1827 ASSERT(!zeros.is(scratch));
1828 ASSERT(!scratch.is(ip));
1829 ASSERT(!source.is(ip));
1830 ASSERT(!zeros.is(ip));
Steve Block6ded16b2010-05-10 14:33:55 +01001831#ifdef CAN_USE_ARMV5_INSTRUCTIONS
1832 clz(zeros, source); // This instruction is only supported after ARM5.
1833#else
Iain Merrick9ac36c92010-09-13 15:29:50 +01001834 mov(zeros, Operand(0, RelocInfo::NONE));
Steve Block8defd9f2010-07-08 12:39:36 +01001835 Move(scratch, source);
Steve Block6ded16b2010-05-10 14:33:55 +01001836 // Top 16.
1837 tst(scratch, Operand(0xffff0000));
1838 add(zeros, zeros, Operand(16), LeaveCC, eq);
1839 mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
1840 // Top 8.
1841 tst(scratch, Operand(0xff000000));
1842 add(zeros, zeros, Operand(8), LeaveCC, eq);
1843 mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
1844 // Top 4.
1845 tst(scratch, Operand(0xf0000000));
1846 add(zeros, zeros, Operand(4), LeaveCC, eq);
1847 mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
1848 // Top 2.
1849 tst(scratch, Operand(0xc0000000));
1850 add(zeros, zeros, Operand(2), LeaveCC, eq);
1851 mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
1852 // Top bit.
1853 tst(scratch, Operand(0x80000000u));
1854 add(zeros, zeros, Operand(1), LeaveCC, eq);
1855#endif
1856}
1857
1858
1859void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
1860 Register first,
1861 Register second,
1862 Register scratch1,
1863 Register scratch2,
1864 Label* failure) {
1865 int kFlatAsciiStringMask =
1866 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
1867 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1868 and_(scratch1, first, Operand(kFlatAsciiStringMask));
1869 and_(scratch2, second, Operand(kFlatAsciiStringMask));
1870 cmp(scratch1, Operand(kFlatAsciiStringTag));
1871 // Ignore second test if first test failed.
1872 cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
1873 b(ne, failure);
1874}
1875
1876
1877void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
1878 Register scratch,
1879 Label* failure) {
1880 int kFlatAsciiStringMask =
1881 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
1882 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1883 and_(scratch, type, Operand(kFlatAsciiStringMask));
1884 cmp(scratch, Operand(kFlatAsciiStringTag));
1885 b(ne, failure);
1886}
1887
1888
1889void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
1890 int frame_alignment = ActivationFrameAlignment();
1891 // Up to four simple arguments are passed in registers r0..r3.
1892 int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
1893 if (frame_alignment > kPointerSize) {
1894 // Make stack end at alignment and make room for num_arguments - 4 words
1895 // and the original value of sp.
1896 mov(scratch, sp);
1897 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
1898 ASSERT(IsPowerOf2(frame_alignment));
1899 and_(sp, sp, Operand(-frame_alignment));
1900 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
1901 } else {
1902 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
1903 }
1904}
1905
1906
1907void MacroAssembler::CallCFunction(ExternalReference function,
1908 int num_arguments) {
1909 mov(ip, Operand(function));
1910 CallCFunction(ip, num_arguments);
1911}
1912
1913
1914void MacroAssembler::CallCFunction(Register function, int num_arguments) {
1915 // Make sure that the stack is aligned before calling a C function unless
1916 // running in the simulator. The simulator has its own alignment check which
1917 // provides more information.
1918#if defined(V8_HOST_ARCH_ARM)
1919 if (FLAG_debug_code) {
1920 int frame_alignment = OS::ActivationFrameAlignment();
1921 int frame_alignment_mask = frame_alignment - 1;
1922 if (frame_alignment > kPointerSize) {
1923 ASSERT(IsPowerOf2(frame_alignment));
1924 Label alignment_as_expected;
1925 tst(sp, Operand(frame_alignment_mask));
1926 b(eq, &alignment_as_expected);
1927 // Don't use Check here, as it will call Runtime_Abort possibly
1928 // re-entering here.
1929 stop("Unexpected alignment");
1930 bind(&alignment_as_expected);
1931 }
1932 }
1933#endif
1934
1935 // Just call directly. The function called cannot cause a GC, or
1936 // allow preemption, so the return address in the link register
1937 // stays correct.
1938 Call(function);
1939 int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
1940 if (OS::ActivationFrameAlignment() > kPointerSize) {
1941 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
1942 } else {
1943 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
1944 }
1945}
1946
1947
Steve Blocka7e24c12009-10-30 11:49:00 +00001948#ifdef ENABLE_DEBUGGER_SUPPORT
1949CodePatcher::CodePatcher(byte* address, int instructions)
1950 : address_(address),
1951 instructions_(instructions),
1952 size_(instructions * Assembler::kInstrSize),
1953 masm_(address, size_ + Assembler::kGap) {
1954 // Create a new macro assembler pointing to the address of the code to patch.
1955 // The size is adjusted with kGap on order for the assembler to generate size
1956 // bytes of instructions without failing with buffer size constraints.
1957 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1958}
1959
1960
1961CodePatcher::~CodePatcher() {
1962 // Indicate that code has changed.
1963 CPU::FlushICache(address_, size_);
1964
1965 // Check that the code was patched as expected.
1966 ASSERT(masm_.pc_ == address_ + size_);
1967 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1968}
1969
1970
1971void CodePatcher::Emit(Instr x) {
1972 masm()->emit(x);
1973}
1974
1975
1976void CodePatcher::Emit(Address addr) {
1977 masm()->emit(reinterpret_cast<Instr>(addr));
1978}
1979#endif // ENABLE_DEBUGGER_SUPPORT
1980
1981
1982} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +01001983
1984#endif // V8_TARGET_ARCH_ARM