blob: 6a095d3ccd223e49985609d4b629cbce4654f3cb [file] [log] [blame]
Steve Block1e0659c2011-05-24 12:43:12 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Iain Merrick9ac36c92010-09-13 15:29:50 +010028#include <limits.h> // For LONG_MIN, LONG_MAX.
29
Steve Blocka7e24c12009-10-30 11:49:00 +000030#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_ARM)
33
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010035#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000036#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
Ben Murdoch8b112d22011-06-08 16:22:53 +010042MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size),
Steve Blocka7e24c12009-10-30 11:49:00 +000044 generating_stub_(false),
Ben Murdoch8b112d22011-06-08 16:22:53 +010045 allow_stub_calls_(true) {
46 if (isolate() != NULL) {
47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48 isolate());
49 }
Steve Blocka7e24c12009-10-30 11:49:00 +000050}
51
52
53// We always generate arm code, never thumb code, even if V8 is compiled to
54// thumb, so we require inter-working support
55#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
56#error "flag -mthumb-interwork missing"
57#endif
58
59
60// We do not support thumb inter-working with an arm architecture not supporting
61// the blx instruction (below v5t). If you know what CPU you are compiling for
62// you can use -march=armv7 or similar.
63#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
64# error "For thumb inter-working we require an architecture which supports blx"
65#endif
66
67
Steve Blocka7e24c12009-10-30 11:49:00 +000068// Using bx does not yield better code, so use it only when required
69#if defined(USE_THUMB_INTERWORK)
70#define USE_BX 1
71#endif
72
73
74void MacroAssembler::Jump(Register target, Condition cond) {
75#if USE_BX
76 bx(target, cond);
77#else
78 mov(pc, Operand(target), LeaveCC, cond);
79#endif
80}
81
82
83void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
84 Condition cond) {
85#if USE_BX
86 mov(ip, Operand(target, rmode), LeaveCC, cond);
87 bx(ip, cond);
88#else
89 mov(pc, Operand(target, rmode), LeaveCC, cond);
90#endif
91}
92
93
94void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
95 Condition cond) {
96 ASSERT(!RelocInfo::IsCodeTarget(rmode));
97 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
98}
99
100
101void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
102 Condition cond) {
103 ASSERT(RelocInfo::IsCodeTarget(rmode));
104 // 'code' is always generated ARM code, never THUMB code
105 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
106}
107
108
Steve Block44f0eee2011-05-26 01:26:41 +0100109int MacroAssembler::CallSize(Register target, Condition cond) {
110#if USE_BLX
111 return kInstrSize;
112#else
113 return 2 * kInstrSize;
114#endif
115}
116
117
Steve Blocka7e24c12009-10-30 11:49:00 +0000118void MacroAssembler::Call(Register target, Condition cond) {
Steve Block44f0eee2011-05-26 01:26:41 +0100119 // Block constant pool for the call instruction sequence.
120 BlockConstPoolScope block_const_pool(this);
121#ifdef DEBUG
122 int pre_position = pc_offset();
123#endif
124
Steve Blocka7e24c12009-10-30 11:49:00 +0000125#if USE_BLX
126 blx(target, cond);
127#else
128 // set lr for return at current pc + 8
129 mov(lr, Operand(pc), LeaveCC, cond);
130 mov(pc, Operand(target), LeaveCC, cond);
131#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100132
133#ifdef DEBUG
134 int post_position = pc_offset();
135 CHECK_EQ(pre_position + CallSize(target, cond), post_position);
136#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000137}
138
139
Steve Block44f0eee2011-05-26 01:26:41 +0100140int MacroAssembler::CallSize(
141 intptr_t target, RelocInfo::Mode rmode, Condition cond) {
142 int size = 2 * kInstrSize;
143 Instr mov_instr = cond | MOV | LeaveCC;
144 if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
145 size += kInstrSize;
146 }
147 return size;
148}
149
150
151void MacroAssembler::Call(
152 intptr_t target, RelocInfo::Mode rmode, Condition cond) {
153 // Block constant pool for the call instruction sequence.
154 BlockConstPoolScope block_const_pool(this);
155#ifdef DEBUG
156 int pre_position = pc_offset();
157#endif
158
Steve Block6ded16b2010-05-10 14:33:55 +0100159#if USE_BLX
160 // On ARMv5 and after the recommended call sequence is:
161 // ldr ip, [pc, #...]
162 // blx ip
163
Steve Block44f0eee2011-05-26 01:26:41 +0100164 // Statement positions are expected to be recorded when the target
165 // address is loaded. The mov method will automatically record
166 // positions when pc is the target, since this is not the case here
167 // we have to do it explicitly.
168 positions_recorder()->WriteRecordedPositions();
Steve Block6ded16b2010-05-10 14:33:55 +0100169
Steve Block44f0eee2011-05-26 01:26:41 +0100170 mov(ip, Operand(target, rmode), LeaveCC, cond);
171 blx(ip, cond);
Steve Block6ded16b2010-05-10 14:33:55 +0100172
173 ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
174#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000175 // Set lr for return at current pc + 8.
176 mov(lr, Operand(pc), LeaveCC, cond);
177 // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
178 mov(pc, Operand(target, rmode), LeaveCC, cond);
Steve Blocka7e24c12009-10-30 11:49:00 +0000179 ASSERT(kCallTargetAddressOffset == kInstrSize);
Steve Block6ded16b2010-05-10 14:33:55 +0100180#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100181
182#ifdef DEBUG
183 int post_position = pc_offset();
184 CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
185#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000186}
187
188
Steve Block44f0eee2011-05-26 01:26:41 +0100189int MacroAssembler::CallSize(
190 byte* target, RelocInfo::Mode rmode, Condition cond) {
191 return CallSize(reinterpret_cast<intptr_t>(target), rmode);
Steve Blocka7e24c12009-10-30 11:49:00 +0000192}
193
194
Steve Block44f0eee2011-05-26 01:26:41 +0100195void MacroAssembler::Call(
196 byte* target, RelocInfo::Mode rmode, Condition cond) {
197#ifdef DEBUG
198 int pre_position = pc_offset();
199#endif
200
201 ASSERT(!RelocInfo::IsCodeTarget(rmode));
202 Call(reinterpret_cast<intptr_t>(target), rmode, cond);
203
204#ifdef DEBUG
205 int post_position = pc_offset();
206 CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
207#endif
208}
209
210
211int MacroAssembler::CallSize(
212 Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
213 return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
214}
215
216
217void MacroAssembler::Call(
218 Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
219#ifdef DEBUG
220 int pre_position = pc_offset();
221#endif
222
Steve Blocka7e24c12009-10-30 11:49:00 +0000223 ASSERT(RelocInfo::IsCodeTarget(rmode));
224 // 'code' is always generated ARM code, never THUMB code
225 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
Steve Block44f0eee2011-05-26 01:26:41 +0100226
227#ifdef DEBUG
228 int post_position = pc_offset();
229 CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
230#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000231}
232
233
234void MacroAssembler::Ret(Condition cond) {
235#if USE_BX
236 bx(lr, cond);
237#else
238 mov(pc, Operand(lr), LeaveCC, cond);
239#endif
240}
241
242
Leon Clarkee46be812010-01-19 14:06:41 +0000243void MacroAssembler::Drop(int count, Condition cond) {
244 if (count > 0) {
245 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
246 }
247}
248
249
Ben Murdochb0fe1622011-05-05 13:52:32 +0100250void MacroAssembler::Ret(int drop, Condition cond) {
251 Drop(drop, cond);
252 Ret(cond);
253}
254
255
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100256void MacroAssembler::Swap(Register reg1,
257 Register reg2,
258 Register scratch,
259 Condition cond) {
Steve Block6ded16b2010-05-10 14:33:55 +0100260 if (scratch.is(no_reg)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100261 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
262 eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
263 eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
Steve Block6ded16b2010-05-10 14:33:55 +0100264 } else {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100265 mov(scratch, reg1, LeaveCC, cond);
266 mov(reg1, reg2, LeaveCC, cond);
267 mov(reg2, scratch, LeaveCC, cond);
Steve Block6ded16b2010-05-10 14:33:55 +0100268 }
269}
270
271
Leon Clarkee46be812010-01-19 14:06:41 +0000272void MacroAssembler::Call(Label* target) {
273 bl(target);
274}
275
276
277void MacroAssembler::Move(Register dst, Handle<Object> value) {
278 mov(dst, Operand(value));
279}
Steve Blockd0582a62009-12-15 09:54:21 +0000280
281
Steve Block6ded16b2010-05-10 14:33:55 +0100282void MacroAssembler::Move(Register dst, Register src) {
283 if (!dst.is(src)) {
284 mov(dst, src);
285 }
286}
287
288
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100289void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
290 Condition cond) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800291 if (!src2.is_reg() &&
292 !src2.must_use_constant_pool() &&
293 src2.immediate() == 0) {
Iain Merrick9ac36c92010-09-13 15:29:50 +0100294 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800295
296 } else if (!src2.is_single_instruction() &&
297 !src2.must_use_constant_pool() &&
Ben Murdoch8b112d22011-06-08 16:22:53 +0100298 CpuFeatures::IsSupported(ARMv7) &&
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800299 IsPowerOf2(src2.immediate() + 1)) {
300 ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
301
302 } else {
303 and_(dst, src1, src2, LeaveCC, cond);
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100304 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100305}
306
307
308void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
309 Condition cond) {
310 ASSERT(lsb < 32);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100311 if (!CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100312 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
313 and_(dst, src1, Operand(mask), LeaveCC, cond);
314 if (lsb != 0) {
315 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
316 }
317 } else {
318 ubfx(dst, src1, lsb, width, cond);
319 }
320}
321
322
323void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
324 Condition cond) {
325 ASSERT(lsb < 32);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100326 if (!CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100327 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
328 and_(dst, src1, Operand(mask), LeaveCC, cond);
329 int shift_up = 32 - lsb - width;
330 int shift_down = lsb + shift_up;
331 if (shift_up != 0) {
332 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
333 }
334 if (shift_down != 0) {
335 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
336 }
337 } else {
338 sbfx(dst, src1, lsb, width, cond);
339 }
340}
341
342
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100343void MacroAssembler::Bfi(Register dst,
344 Register src,
345 Register scratch,
346 int lsb,
347 int width,
348 Condition cond) {
349 ASSERT(0 <= lsb && lsb < 32);
350 ASSERT(0 <= width && width < 32);
351 ASSERT(lsb + width < 32);
352 ASSERT(!scratch.is(dst));
353 if (width == 0) return;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100354 if (!CpuFeatures::IsSupported(ARMv7)) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100355 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
356 bic(dst, dst, Operand(mask));
357 and_(scratch, src, Operand((1 << width) - 1));
358 mov(scratch, Operand(scratch, LSL, lsb));
359 orr(dst, dst, scratch);
360 } else {
361 bfi(dst, src, lsb, width, cond);
362 }
363}
364
365
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100366void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
367 ASSERT(lsb < 32);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100368 if (!CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100369 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
370 bic(dst, dst, Operand(mask));
371 } else {
372 bfc(dst, lsb, width, cond);
373 }
374}
375
376
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100377void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
378 Condition cond) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100379 if (!CpuFeatures::IsSupported(ARMv7)) {
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100380 ASSERT(!dst.is(pc) && !src.rm().is(pc));
381 ASSERT((satpos >= 0) && (satpos <= 31));
382
383 // These asserts are required to ensure compatibility with the ARMv7
384 // implementation.
385 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
386 ASSERT(src.rs().is(no_reg));
387
388 Label done;
389 int satval = (1 << satpos) - 1;
390
391 if (cond != al) {
392 b(NegateCondition(cond), &done); // Skip saturate if !condition.
393 }
394 if (!(src.is_reg() && dst.is(src.rm()))) {
395 mov(dst, src);
396 }
397 tst(dst, Operand(~satval));
398 b(eq, &done);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100399 mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
Kristian Monsen50ef84f2010-07-29 15:18:00 +0100400 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
401 bind(&done);
402 } else {
403 usat(dst, satpos, src, cond);
404 }
405}
406
407
Steve Blocka7e24c12009-10-30 11:49:00 +0000408void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
409 // Empty the const pool.
410 CheckConstPool(true, true);
411 add(pc, pc, Operand(index,
412 LSL,
Steve Block1e0659c2011-05-24 12:43:12 +0100413 Instruction::kInstrSizeLog2 - kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000414 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
415 nop(); // Jump table alignment.
416 for (int i = 0; i < targets.length(); i++) {
417 b(targets[i]);
418 }
419}
420
421
422void MacroAssembler::LoadRoot(Register destination,
423 Heap::RootListIndex index,
424 Condition cond) {
Andrei Popescu31002712010-02-23 13:46:05 +0000425 ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
Steve Blocka7e24c12009-10-30 11:49:00 +0000426}
427
428
Kristian Monsen25f61362010-05-21 11:50:48 +0100429void MacroAssembler::StoreRoot(Register source,
430 Heap::RootListIndex index,
431 Condition cond) {
432 str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
433}
434
435
Steve Block6ded16b2010-05-10 14:33:55 +0100436void MacroAssembler::RecordWriteHelper(Register object,
Steve Block8defd9f2010-07-08 12:39:36 +0100437 Register address,
438 Register scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100439 if (emit_debug_code()) {
Steve Block6ded16b2010-05-10 14:33:55 +0100440 // Check that the object is not in new space.
441 Label not_in_new_space;
Steve Block8defd9f2010-07-08 12:39:36 +0100442 InNewSpace(object, scratch, ne, &not_in_new_space);
Steve Block6ded16b2010-05-10 14:33:55 +0100443 Abort("new-space object passed to RecordWriteHelper");
444 bind(&not_in_new_space);
445 }
Leon Clarke4515c472010-02-03 11:58:03 +0000446
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100447 // Calculate page address.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100448 Bfc(object, 0, kPageSizeBits);
449
450 // Calculate region number.
Steve Block8defd9f2010-07-08 12:39:36 +0100451 Ubfx(address, address, Page::kRegionSizeLog2,
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100452 kPageSizeBits - Page::kRegionSizeLog2);
Steve Blocka7e24c12009-10-30 11:49:00 +0000453
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100454 // Mark region dirty.
Steve Block8defd9f2010-07-08 12:39:36 +0100455 ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000456 mov(ip, Operand(1));
Steve Block8defd9f2010-07-08 12:39:36 +0100457 orr(scratch, scratch, Operand(ip, LSL, address));
458 str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
Steve Block6ded16b2010-05-10 14:33:55 +0100459}
460
461
462void MacroAssembler::InNewSpace(Register object,
463 Register scratch,
Steve Block1e0659c2011-05-24 12:43:12 +0100464 Condition cond,
Steve Block6ded16b2010-05-10 14:33:55 +0100465 Label* branch) {
Steve Block1e0659c2011-05-24 12:43:12 +0100466 ASSERT(cond == eq || cond == ne);
Steve Block44f0eee2011-05-26 01:26:41 +0100467 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
468 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
Steve Block1e0659c2011-05-24 12:43:12 +0100469 b(cond, branch);
Steve Block6ded16b2010-05-10 14:33:55 +0100470}
471
472
473// Will clobber 4 registers: object, offset, scratch, ip. The
474// register 'object' contains a heap object pointer. The heap object
475// tag is shifted away.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100476void MacroAssembler::RecordWrite(Register object,
477 Operand offset,
478 Register scratch0,
479 Register scratch1) {
Steve Block6ded16b2010-05-10 14:33:55 +0100480 // The compiled code assumes that record write doesn't change the
481 // context register, so we check that none of the clobbered
482 // registers are cp.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100483 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Steve Block6ded16b2010-05-10 14:33:55 +0100484
485 Label done;
486
487 // First, test that the object is not in the new space. We cannot set
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100488 // region marks for new space pages.
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100489 InNewSpace(object, scratch0, eq, &done);
Steve Block6ded16b2010-05-10 14:33:55 +0100490
Steve Block8defd9f2010-07-08 12:39:36 +0100491 // Add offset into the object.
492 add(scratch0, object, offset);
493
Steve Block6ded16b2010-05-10 14:33:55 +0100494 // Record the actual write.
Steve Block8defd9f2010-07-08 12:39:36 +0100495 RecordWriteHelper(object, scratch0, scratch1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000496
497 bind(&done);
Leon Clarke4515c472010-02-03 11:58:03 +0000498
499 // Clobber all input registers when running with the debug-code flag
500 // turned on to provoke errors.
Steve Block44f0eee2011-05-26 01:26:41 +0100501 if (emit_debug_code()) {
Steve Block6ded16b2010-05-10 14:33:55 +0100502 mov(object, Operand(BitCast<int32_t>(kZapValue)));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100503 mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
504 mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
Leon Clarke4515c472010-02-03 11:58:03 +0000505 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000506}
507
508
Steve Block8defd9f2010-07-08 12:39:36 +0100509// Will clobber 4 registers: object, address, scratch, ip. The
510// register 'object' contains a heap object pointer. The heap object
511// tag is shifted away.
512void MacroAssembler::RecordWrite(Register object,
513 Register address,
514 Register scratch) {
515 // The compiled code assumes that record write doesn't change the
516 // context register, so we check that none of the clobbered
517 // registers are cp.
518 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
519
520 Label done;
521
522 // First, test that the object is not in the new space. We cannot set
523 // region marks for new space pages.
524 InNewSpace(object, scratch, eq, &done);
525
526 // Record the actual write.
527 RecordWriteHelper(object, address, scratch);
528
529 bind(&done);
530
531 // Clobber all input registers when running with the debug-code flag
532 // turned on to provoke errors.
Steve Block44f0eee2011-05-26 01:26:41 +0100533 if (emit_debug_code()) {
Steve Block8defd9f2010-07-08 12:39:36 +0100534 mov(object, Operand(BitCast<int32_t>(kZapValue)));
535 mov(address, Operand(BitCast<int32_t>(kZapValue)));
536 mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
537 }
538}
539
540
Ben Murdochb0fe1622011-05-05 13:52:32 +0100541// Push and pop all registers that can hold pointers.
542void MacroAssembler::PushSafepointRegisters() {
543 // Safepoints expect a block of contiguous register values starting with r0:
544 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
545 // Safepoints expect a block of kNumSafepointRegisters values on the
546 // stack, so adjust the stack for unsaved registers.
547 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
548 ASSERT(num_unsaved >= 0);
549 sub(sp, sp, Operand(num_unsaved * kPointerSize));
550 stm(db_w, sp, kSafepointSavedRegisters);
551}
552
553
554void MacroAssembler::PopSafepointRegisters() {
555 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
556 ldm(ia_w, sp, kSafepointSavedRegisters);
557 add(sp, sp, Operand(num_unsaved * kPointerSize));
558}
559
560
Ben Murdochb8e0da22011-05-16 14:20:40 +0100561void MacroAssembler::PushSafepointRegistersAndDoubles() {
562 PushSafepointRegisters();
563 sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
564 kDoubleSize));
565 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
566 vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
567 }
568}
569
570
571void MacroAssembler::PopSafepointRegistersAndDoubles() {
572 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
573 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
574 }
575 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
576 kDoubleSize));
577 PopSafepointRegisters();
578}
579
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100580void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
581 Register dst) {
582 str(src, SafepointRegistersAndDoublesSlot(dst));
Steve Block1e0659c2011-05-24 12:43:12 +0100583}
584
585
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100586void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
587 str(src, SafepointRegisterSlot(dst));
Steve Block1e0659c2011-05-24 12:43:12 +0100588}
589
590
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100591void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
592 ldr(dst, SafepointRegisterSlot(src));
Steve Block1e0659c2011-05-24 12:43:12 +0100593}
594
595
Ben Murdochb0fe1622011-05-05 13:52:32 +0100596int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
597 // The registers are pushed starting with the highest encoding,
598 // which means that lowest encodings are closest to the stack pointer.
599 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
600 return reg_code;
601}
602
603
Steve Block1e0659c2011-05-24 12:43:12 +0100604MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
605 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
606}
607
608
609MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
610 // General purpose registers are pushed last on the stack.
611 int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
612 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
613 return MemOperand(sp, doubles_size + register_offset);
614}
615
616
Leon Clarkef7060e22010-06-03 12:02:55 +0100617void MacroAssembler::Ldrd(Register dst1, Register dst2,
618 const MemOperand& src, Condition cond) {
619 ASSERT(src.rm().is(no_reg));
620 ASSERT(!dst1.is(lr)); // r14.
621 ASSERT_EQ(0, dst1.code() % 2);
622 ASSERT_EQ(dst1.code() + 1, dst2.code());
623
624 // Generate two ldr instructions if ldrd is not available.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100625 if (CpuFeatures::IsSupported(ARMv7)) {
Leon Clarkef7060e22010-06-03 12:02:55 +0100626 CpuFeatures::Scope scope(ARMv7);
627 ldrd(dst1, dst2, src, cond);
628 } else {
629 MemOperand src2(src);
630 src2.set_offset(src2.offset() + 4);
631 if (dst1.is(src.rn())) {
632 ldr(dst2, src2, cond);
633 ldr(dst1, src, cond);
634 } else {
635 ldr(dst1, src, cond);
636 ldr(dst2, src2, cond);
637 }
638 }
639}
640
641
642void MacroAssembler::Strd(Register src1, Register src2,
643 const MemOperand& dst, Condition cond) {
644 ASSERT(dst.rm().is(no_reg));
645 ASSERT(!src1.is(lr)); // r14.
646 ASSERT_EQ(0, src1.code() % 2);
647 ASSERT_EQ(src1.code() + 1, src2.code());
648
649 // Generate two str instructions if strd is not available.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100650 if (CpuFeatures::IsSupported(ARMv7)) {
Leon Clarkef7060e22010-06-03 12:02:55 +0100651 CpuFeatures::Scope scope(ARMv7);
652 strd(src1, src2, dst, cond);
653 } else {
654 MemOperand dst2(dst);
655 dst2.set_offset(dst2.offset() + 4);
656 str(src1, dst, cond);
657 str(src2, dst2, cond);
658 }
659}
660
661
Ben Murdochb8e0da22011-05-16 14:20:40 +0100662void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
663 const Register scratch,
664 const Condition cond) {
665 vmrs(scratch, cond);
666 bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
667 vmsr(scratch, cond);
668}
669
670
671void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
672 const DwVfpRegister src2,
673 const Condition cond) {
674 // Compare and move FPSCR flags to the normal condition flags.
675 VFPCompareAndLoadFlags(src1, src2, pc, cond);
676}
677
678void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
679 const double src2,
680 const Condition cond) {
681 // Compare and move FPSCR flags to the normal condition flags.
682 VFPCompareAndLoadFlags(src1, src2, pc, cond);
683}
684
685
686void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
687 const DwVfpRegister src2,
688 const Register fpscr_flags,
689 const Condition cond) {
690 // Compare and load FPSCR.
691 vcmp(src1, src2, cond);
692 vmrs(fpscr_flags, cond);
693}
694
695void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
696 const double src2,
697 const Register fpscr_flags,
698 const Condition cond) {
699 // Compare and load FPSCR.
700 vcmp(src1, src2, cond);
701 vmrs(fpscr_flags, cond);
Ben Murdoch086aeea2011-05-13 15:57:08 +0100702}
703
704
Steve Blocka7e24c12009-10-30 11:49:00 +0000705void MacroAssembler::EnterFrame(StackFrame::Type type) {
706 // r0-r3: preserved
707 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
708 mov(ip, Operand(Smi::FromInt(type)));
709 push(ip);
710 mov(ip, Operand(CodeObject()));
711 push(ip);
712 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
713}
714
715
716void MacroAssembler::LeaveFrame(StackFrame::Type type) {
717 // r0: preserved
718 // r1: preserved
719 // r2: preserved
720
721 // Drop the execution stack down to the frame pointer and restore
722 // the caller frame pointer and return address.
723 mov(sp, fp);
724 ldm(ia_w, sp, fp.bit() | lr.bit());
725}
726
727
Steve Block1e0659c2011-05-24 12:43:12 +0100728void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
729 // Setup the frame structure on the stack.
730 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
731 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
732 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
733 Push(lr, fp);
Andrei Popescu402d9372010-02-26 13:31:12 +0000734 mov(fp, Operand(sp)); // Setup new frame pointer.
Steve Block1e0659c2011-05-24 12:43:12 +0100735 // Reserve room for saved entry sp and code object.
736 sub(sp, sp, Operand(2 * kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +0100737 if (emit_debug_code()) {
Steve Block1e0659c2011-05-24 12:43:12 +0100738 mov(ip, Operand(0));
739 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
740 }
Andrei Popescu402d9372010-02-26 13:31:12 +0000741 mov(ip, Operand(CodeObject()));
Steve Block1e0659c2011-05-24 12:43:12 +0100742 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000743
744 // Save the frame pointer and the context in top.
Steve Block44f0eee2011-05-26 01:26:41 +0100745 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
Steve Blocka7e24c12009-10-30 11:49:00 +0000746 str(fp, MemOperand(ip));
Steve Block44f0eee2011-05-26 01:26:41 +0100747 mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
Steve Blocka7e24c12009-10-30 11:49:00 +0000748 str(cp, MemOperand(ip));
749
Ben Murdochb0fe1622011-05-05 13:52:32 +0100750 // Optionally save all double registers.
751 if (save_doubles) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100752 DwVfpRegister first = d0;
753 DwVfpRegister last =
754 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
755 vstm(db_w, sp, first, last);
Steve Block1e0659c2011-05-24 12:43:12 +0100756 // Note that d0 will be accessible at
757 // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
758 // since the sp slot and code slot were pushed after the fp.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100759 }
Steve Block1e0659c2011-05-24 12:43:12 +0100760
761 // Reserve place for the return address and stack space and align the frame
762 // preparing for calling the runtime function.
763 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
764 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
765 if (frame_alignment > 0) {
766 ASSERT(IsPowerOf2(frame_alignment));
767 and_(sp, sp, Operand(-frame_alignment));
768 }
769
770 // Set the exit frame sp value to point just before the return address
771 // location.
772 add(ip, sp, Operand(kPointerSize));
773 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000774}
775
776
Steve Block6ded16b2010-05-10 14:33:55 +0100777void MacroAssembler::InitializeNewString(Register string,
778 Register length,
779 Heap::RootListIndex map_index,
780 Register scratch1,
781 Register scratch2) {
782 mov(scratch1, Operand(length, LSL, kSmiTagSize));
783 LoadRoot(scratch2, map_index);
784 str(scratch1, FieldMemOperand(string, String::kLengthOffset));
785 mov(scratch1, Operand(String::kEmptyHashField));
786 str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
787 str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
788}
789
790
791int MacroAssembler::ActivationFrameAlignment() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000792#if defined(V8_HOST_ARCH_ARM)
793 // Running on the real platform. Use the alignment as mandated by the local
794 // environment.
795 // Note: This will break if we ever start generating snapshots on one ARM
796 // platform for another ARM platform with a different alignment.
Steve Block6ded16b2010-05-10 14:33:55 +0100797 return OS::ActivationFrameAlignment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000798#else // defined(V8_HOST_ARCH_ARM)
799 // If we are using the simulator then we should always align to the expected
800 // alignment. As the simulator is used to generate snapshots we do not know
Steve Block6ded16b2010-05-10 14:33:55 +0100801 // if the target platform will need alignment, so this is controlled from a
802 // flag.
803 return FLAG_sim_stack_alignment;
Steve Blocka7e24c12009-10-30 11:49:00 +0000804#endif // defined(V8_HOST_ARCH_ARM)
Steve Blocka7e24c12009-10-30 11:49:00 +0000805}
806
807
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100808void MacroAssembler::LeaveExitFrame(bool save_doubles,
809 Register argument_count) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100810 // Optionally restore all double registers.
811 if (save_doubles) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100812 // Calculate the stack location of the saved doubles and restore them.
813 const int offset = 2 * kPointerSize;
814 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
815 DwVfpRegister first = d0;
816 DwVfpRegister last =
817 DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
818 vldm(ia, r3, first, last);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100819 }
820
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 // Clear top frame.
Iain Merrick9ac36c92010-09-13 15:29:50 +0100822 mov(r3, Operand(0, RelocInfo::NONE));
Steve Block44f0eee2011-05-26 01:26:41 +0100823 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
Steve Blocka7e24c12009-10-30 11:49:00 +0000824 str(r3, MemOperand(ip));
825
826 // Restore current context from top and clear it in debug mode.
Steve Block44f0eee2011-05-26 01:26:41 +0100827 mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
Steve Blocka7e24c12009-10-30 11:49:00 +0000828 ldr(cp, MemOperand(ip));
829#ifdef DEBUG
830 str(r3, MemOperand(ip));
831#endif
832
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100833 // Tear down the exit frame, pop the arguments, and return.
Steve Block1e0659c2011-05-24 12:43:12 +0100834 mov(sp, Operand(fp));
835 ldm(ia_w, sp, fp.bit() | lr.bit());
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100836 if (argument_count.is_valid()) {
837 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
838 }
839}
840
841void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100842 vmov(dst, r0, r1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000843}
844
845
846void MacroAssembler::InvokePrologue(const ParameterCount& expected,
847 const ParameterCount& actual,
848 Handle<Code> code_constant,
849 Register code_reg,
850 Label* done,
Ben Murdochb8e0da22011-05-16 14:20:40 +0100851 InvokeFlag flag,
Steve Block44f0eee2011-05-26 01:26:41 +0100852 CallWrapper* call_wrapper) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000853 bool definitely_matches = false;
854 Label regular_invoke;
855
856 // Check whether the expected and actual arguments count match. If not,
857 // setup registers according to contract with ArgumentsAdaptorTrampoline:
858 // r0: actual arguments count
859 // r1: function (passed through to callee)
860 // r2: expected arguments count
861 // r3: callee code entry
862
863 // The code below is made a lot easier because the calling code already sets
864 // up actual and expected registers according to the contract if values are
865 // passed in registers.
866 ASSERT(actual.is_immediate() || actual.reg().is(r0));
867 ASSERT(expected.is_immediate() || expected.reg().is(r2));
868 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
869
870 if (expected.is_immediate()) {
871 ASSERT(actual.is_immediate());
872 if (expected.immediate() == actual.immediate()) {
873 definitely_matches = true;
874 } else {
875 mov(r0, Operand(actual.immediate()));
876 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
877 if (expected.immediate() == sentinel) {
878 // Don't worry about adapting arguments for builtins that
879 // don't want that done. Skip adaption code by making it look
880 // like we have a match between expected and actual number of
881 // arguments.
882 definitely_matches = true;
883 } else {
884 mov(r2, Operand(expected.immediate()));
885 }
886 }
887 } else {
888 if (actual.is_immediate()) {
889 cmp(expected.reg(), Operand(actual.immediate()));
890 b(eq, &regular_invoke);
891 mov(r0, Operand(actual.immediate()));
892 } else {
893 cmp(expected.reg(), Operand(actual.reg()));
894 b(eq, &regular_invoke);
895 }
896 }
897
898 if (!definitely_matches) {
899 if (!code_constant.is_null()) {
900 mov(r3, Operand(code_constant));
901 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
902 }
903
904 Handle<Code> adaptor =
Steve Block44f0eee2011-05-26 01:26:41 +0100905 isolate()->builtins()->ArgumentsAdaptorTrampoline();
Steve Blocka7e24c12009-10-30 11:49:00 +0000906 if (flag == CALL_FUNCTION) {
Steve Block44f0eee2011-05-26 01:26:41 +0100907 if (call_wrapper != NULL) {
908 call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
909 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000910 Call(adaptor, RelocInfo::CODE_TARGET);
Steve Block44f0eee2011-05-26 01:26:41 +0100911 if (call_wrapper != NULL) call_wrapper->AfterCall();
Steve Blocka7e24c12009-10-30 11:49:00 +0000912 b(done);
913 } else {
914 Jump(adaptor, RelocInfo::CODE_TARGET);
915 }
916 bind(&regular_invoke);
917 }
918}
919
920
921void MacroAssembler::InvokeCode(Register code,
922 const ParameterCount& expected,
923 const ParameterCount& actual,
Ben Murdochb8e0da22011-05-16 14:20:40 +0100924 InvokeFlag flag,
Steve Block44f0eee2011-05-26 01:26:41 +0100925 CallWrapper* call_wrapper) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000926 Label done;
927
Ben Murdochb8e0da22011-05-16 14:20:40 +0100928 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
Steve Block44f0eee2011-05-26 01:26:41 +0100929 call_wrapper);
Steve Blocka7e24c12009-10-30 11:49:00 +0000930 if (flag == CALL_FUNCTION) {
Steve Block44f0eee2011-05-26 01:26:41 +0100931 if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Steve Blocka7e24c12009-10-30 11:49:00 +0000932 Call(code);
Steve Block44f0eee2011-05-26 01:26:41 +0100933 if (call_wrapper != NULL) call_wrapper->AfterCall();
Steve Blocka7e24c12009-10-30 11:49:00 +0000934 } else {
935 ASSERT(flag == JUMP_FUNCTION);
936 Jump(code);
937 }
938
939 // Continue here if InvokePrologue does handle the invocation due to
940 // mismatched parameter counts.
941 bind(&done);
942}
943
944
945void MacroAssembler::InvokeCode(Handle<Code> code,
946 const ParameterCount& expected,
947 const ParameterCount& actual,
948 RelocInfo::Mode rmode,
949 InvokeFlag flag) {
950 Label done;
951
952 InvokePrologue(expected, actual, code, no_reg, &done, flag);
953 if (flag == CALL_FUNCTION) {
954 Call(code, rmode);
955 } else {
956 Jump(code, rmode);
957 }
958
959 // Continue here if InvokePrologue does handle the invocation due to
960 // mismatched parameter counts.
961 bind(&done);
962}
963
964
965void MacroAssembler::InvokeFunction(Register fun,
966 const ParameterCount& actual,
Ben Murdochb8e0da22011-05-16 14:20:40 +0100967 InvokeFlag flag,
Steve Block44f0eee2011-05-26 01:26:41 +0100968 CallWrapper* call_wrapper) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000969 // Contract with called JS functions requires that function is passed in r1.
970 ASSERT(fun.is(r1));
971
972 Register expected_reg = r2;
973 Register code_reg = r3;
974
975 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
976 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
977 ldr(expected_reg,
978 FieldMemOperand(code_reg,
979 SharedFunctionInfo::kFormalParameterCountOffset));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100980 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000981 ldr(code_reg,
Steve Block791712a2010-08-27 10:21:07 +0100982 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000983
984 ParameterCount expected(expected_reg);
Steve Block44f0eee2011-05-26 01:26:41 +0100985 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
Steve Blocka7e24c12009-10-30 11:49:00 +0000986}
987
988
Andrei Popescu402d9372010-02-26 13:31:12 +0000989void MacroAssembler::InvokeFunction(JSFunction* function,
990 const ParameterCount& actual,
991 InvokeFlag flag) {
992 ASSERT(function->is_compiled());
993
994 // Get the function and setup the context.
995 mov(r1, Operand(Handle<JSFunction>(function)));
996 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
997
998 // Invoke the cached code.
999 Handle<Code> code(function->code());
1000 ParameterCount expected(function->shared()->formal_parameter_count());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001001 if (V8::UseCrankshaft()) {
1002 // TODO(kasperl): For now, we always call indirectly through the
1003 // code field in the function to allow recompilation to take effect
1004 // without changing any of the call sites.
1005 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1006 InvokeCode(r3, expected, actual, flag);
1007 } else {
1008 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
1009 }
1010}
1011
1012
1013void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1014 Register map,
1015 Register scratch,
1016 Label* fail) {
1017 ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1018 IsInstanceJSObjectType(map, scratch, fail);
1019}
1020
1021
1022void MacroAssembler::IsInstanceJSObjectType(Register map,
1023 Register scratch,
1024 Label* fail) {
1025 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1026 cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
1027 b(lt, fail);
1028 cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
1029 b(gt, fail);
1030}
1031
1032
1033void MacroAssembler::IsObjectJSStringType(Register object,
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001034 Register scratch,
1035 Label* fail) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001036 ASSERT(kNotStringTag != 0);
1037
1038 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1039 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1040 tst(scratch, Operand(kIsNotStringMask));
Steve Block1e0659c2011-05-24 12:43:12 +01001041 b(ne, fail);
Andrei Popescu402d9372010-02-26 13:31:12 +00001042}
1043
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001044
Steve Blocka7e24c12009-10-30 11:49:00 +00001045#ifdef ENABLE_DEBUGGER_SUPPORT
Andrei Popescu402d9372010-02-26 13:31:12 +00001046void MacroAssembler::DebugBreak() {
1047 ASSERT(allow_stub_calls());
Iain Merrick9ac36c92010-09-13 15:29:50 +01001048 mov(r0, Operand(0, RelocInfo::NONE));
Steve Block44f0eee2011-05-26 01:26:41 +01001049 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
Andrei Popescu402d9372010-02-26 13:31:12 +00001050 CEntryStub ces(1);
1051 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1052}
Steve Blocka7e24c12009-10-30 11:49:00 +00001053#endif
1054
1055
1056void MacroAssembler::PushTryHandler(CodeLocation try_location,
1057 HandlerType type) {
1058 // Adjust this code if not the case.
1059 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1060 // The pc (return address) is passed in register lr.
1061 if (try_location == IN_JAVASCRIPT) {
1062 if (type == TRY_CATCH_HANDLER) {
1063 mov(r3, Operand(StackHandler::TRY_CATCH));
1064 } else {
1065 mov(r3, Operand(StackHandler::TRY_FINALLY));
1066 }
1067 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
1068 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
1069 && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1070 stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
1071 // Save the current handler as the next handler.
Steve Block44f0eee2011-05-26 01:26:41 +01001072 mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Steve Blocka7e24c12009-10-30 11:49:00 +00001073 ldr(r1, MemOperand(r3));
1074 ASSERT(StackHandlerConstants::kNextOffset == 0);
1075 push(r1);
1076 // Link this handler as the new current one.
1077 str(sp, MemOperand(r3));
1078 } else {
1079 // Must preserve r0-r4, r5-r7 are available.
1080 ASSERT(try_location == IN_JS_ENTRY);
1081 // The frame pointer does not point to a JS frame so we save NULL
1082 // for fp. We expect the code throwing an exception to check fp
1083 // before dereferencing it to restore the context.
Iain Merrick9ac36c92010-09-13 15:29:50 +01001084 mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer.
Steve Blocka7e24c12009-10-30 11:49:00 +00001085 mov(r6, Operand(StackHandler::ENTRY));
1086 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
1087 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
1088 && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1089 stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
1090 // Save the current handler as the next handler.
Steve Block44f0eee2011-05-26 01:26:41 +01001091 mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Steve Blocka7e24c12009-10-30 11:49:00 +00001092 ldr(r6, MemOperand(r7));
1093 ASSERT(StackHandlerConstants::kNextOffset == 0);
1094 push(r6);
1095 // Link this handler as the new current one.
1096 str(sp, MemOperand(r7));
1097 }
1098}
1099
1100
Leon Clarkee46be812010-01-19 14:06:41 +00001101void MacroAssembler::PopTryHandler() {
1102 ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
1103 pop(r1);
Steve Block44f0eee2011-05-26 01:26:41 +01001104 mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Leon Clarkee46be812010-01-19 14:06:41 +00001105 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1106 str(r1, MemOperand(ip));
1107}
1108
1109
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001110void MacroAssembler::Throw(Register value) {
1111 // r0 is expected to hold the exception.
1112 if (!value.is(r0)) {
1113 mov(r0, value);
1114 }
1115
1116 // Adjust this code if not the case.
1117 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1118
1119 // Drop the sp to the top of the handler.
Steve Block44f0eee2011-05-26 01:26:41 +01001120 mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001121 ldr(sp, MemOperand(r3));
1122
1123 // Restore the next handler and frame pointer, discard handler state.
1124 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1125 pop(r2);
1126 str(r2, MemOperand(r3));
1127 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
1128 ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
1129
1130 // Before returning we restore the context from the frame pointer if
1131 // not NULL. The frame pointer is NULL in the exception handler of a
1132 // JS entry frame.
1133 cmp(fp, Operand(0, RelocInfo::NONE));
1134 // Set cp to NULL if fp is NULL.
1135 mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1136 // Restore cp otherwise.
1137 ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1138#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001139 if (emit_debug_code()) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001140 mov(lr, Operand(pc));
1141 }
1142#endif
1143 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1144 pop(pc);
1145}
1146
1147
1148void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
1149 Register value) {
1150 // Adjust this code if not the case.
1151 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1152
1153 // r0 is expected to hold the exception.
1154 if (!value.is(r0)) {
1155 mov(r0, value);
1156 }
1157
1158 // Drop sp to the top stack handler.
Steve Block44f0eee2011-05-26 01:26:41 +01001159 mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001160 ldr(sp, MemOperand(r3));
1161
1162 // Unwind the handlers until the ENTRY handler is found.
1163 Label loop, done;
1164 bind(&loop);
1165 // Load the type of the current stack handler.
1166 const int kStateOffset = StackHandlerConstants::kStateOffset;
1167 ldr(r2, MemOperand(sp, kStateOffset));
1168 cmp(r2, Operand(StackHandler::ENTRY));
1169 b(eq, &done);
1170 // Fetch the next handler in the list.
1171 const int kNextOffset = StackHandlerConstants::kNextOffset;
1172 ldr(sp, MemOperand(sp, kNextOffset));
1173 jmp(&loop);
1174 bind(&done);
1175
1176 // Set the top handler address to next handler past the current ENTRY handler.
1177 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1178 pop(r2);
1179 str(r2, MemOperand(r3));
1180
1181 if (type == OUT_OF_MEMORY) {
1182 // Set external caught exception to false.
Steve Block44f0eee2011-05-26 01:26:41 +01001183 ExternalReference external_caught(
1184 Isolate::k_external_caught_exception_address, isolate());
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001185 mov(r0, Operand(false, RelocInfo::NONE));
1186 mov(r2, Operand(external_caught));
1187 str(r0, MemOperand(r2));
1188
1189 // Set pending exception and r0 to out of memory exception.
1190 Failure* out_of_memory = Failure::OutOfMemoryException();
1191 mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
Steve Block44f0eee2011-05-26 01:26:41 +01001192 mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
1193 isolate())));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001194 str(r0, MemOperand(r2));
1195 }
1196
1197 // Stack layout at this point. See also StackHandlerConstants.
1198 // sp -> state (ENTRY)
1199 // fp
1200 // lr
1201
1202 // Discard handler state (r2 is not used) and restore frame pointer.
1203 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
1204 ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
1205 // Before returning we restore the context from the frame pointer if
1206 // not NULL. The frame pointer is NULL in the exception handler of a
1207 // JS entry frame.
1208 cmp(fp, Operand(0, RelocInfo::NONE));
1209 // Set cp to NULL if fp is NULL.
1210 mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1211 // Restore cp otherwise.
1212 ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1213#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001214 if (emit_debug_code()) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001215 mov(lr, Operand(pc));
1216 }
1217#endif
1218 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1219 pop(pc);
1220}
1221
1222
Steve Blocka7e24c12009-10-30 11:49:00 +00001223void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1224 Register scratch,
1225 Label* miss) {
1226 Label same_contexts;
1227
1228 ASSERT(!holder_reg.is(scratch));
1229 ASSERT(!holder_reg.is(ip));
1230 ASSERT(!scratch.is(ip));
1231
1232 // Load current lexical context from the stack frame.
1233 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1234 // In debug mode, make sure the lexical context is set.
1235#ifdef DEBUG
Iain Merrick9ac36c92010-09-13 15:29:50 +01001236 cmp(scratch, Operand(0, RelocInfo::NONE));
Steve Blocka7e24c12009-10-30 11:49:00 +00001237 Check(ne, "we should not have an empty lexical context");
1238#endif
1239
1240 // Load the global context of the current context.
1241 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1242 ldr(scratch, FieldMemOperand(scratch, offset));
1243 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
1244
1245 // Check the context is a global context.
Steve Block44f0eee2011-05-26 01:26:41 +01001246 if (emit_debug_code()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001247 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1248 // Cannot use ip as a temporary in this verification code. Due to the fact
1249 // that ip is clobbered as part of cmp with an object Operand.
1250 push(holder_reg); // Temporarily save holder on the stack.
1251 // Read the first word and compare to the global_context_map.
1252 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1253 LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1254 cmp(holder_reg, ip);
1255 Check(eq, "JSGlobalObject::global_context should be a global context.");
1256 pop(holder_reg); // Restore holder.
1257 }
1258
1259 // Check if both contexts are the same.
1260 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1261 cmp(scratch, Operand(ip));
1262 b(eq, &same_contexts);
1263
1264 // Check the context is a global context.
Steve Block44f0eee2011-05-26 01:26:41 +01001265 if (emit_debug_code()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
1267 // Cannot use ip as a temporary in this verification code. Due to the fact
1268 // that ip is clobbered as part of cmp with an object Operand.
1269 push(holder_reg); // Temporarily save holder on the stack.
1270 mov(holder_reg, ip); // Move ip to its holding place.
1271 LoadRoot(ip, Heap::kNullValueRootIndex);
1272 cmp(holder_reg, ip);
1273 Check(ne, "JSGlobalProxy::context() should not be null.");
1274
1275 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1276 LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
1277 cmp(holder_reg, ip);
1278 Check(eq, "JSGlobalObject::global_context should be a global context.");
1279 // Restore ip is not needed. ip is reloaded below.
1280 pop(holder_reg); // Restore holder.
1281 // Restore ip to holder's context.
1282 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
1283 }
1284
1285 // Check that the security token in the calling global object is
1286 // compatible with the security token in the receiving global
1287 // object.
1288 int token_offset = Context::kHeaderSize +
1289 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1290
1291 ldr(scratch, FieldMemOperand(scratch, token_offset));
1292 ldr(ip, FieldMemOperand(ip, token_offset));
1293 cmp(scratch, Operand(ip));
1294 b(ne, miss);
1295
1296 bind(&same_contexts);
1297}
1298
1299
1300void MacroAssembler::AllocateInNewSpace(int object_size,
1301 Register result,
1302 Register scratch1,
1303 Register scratch2,
1304 Label* gc_required,
1305 AllocationFlags flags) {
John Reck59135872010-11-02 12:39:01 -07001306 if (!FLAG_inline_new) {
Steve Block44f0eee2011-05-26 01:26:41 +01001307 if (emit_debug_code()) {
John Reck59135872010-11-02 12:39:01 -07001308 // Trash the registers to simulate an allocation failure.
1309 mov(result, Operand(0x7091));
1310 mov(scratch1, Operand(0x7191));
1311 mov(scratch2, Operand(0x7291));
1312 }
1313 jmp(gc_required);
1314 return;
1315 }
1316
Steve Blocka7e24c12009-10-30 11:49:00 +00001317 ASSERT(!result.is(scratch1));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001318 ASSERT(!result.is(scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00001319 ASSERT(!scratch1.is(scratch2));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001320 ASSERT(!scratch1.is(ip));
1321 ASSERT(!scratch2.is(ip));
Steve Blocka7e24c12009-10-30 11:49:00 +00001322
Kristian Monsen25f61362010-05-21 11:50:48 +01001323 // Make object size into bytes.
1324 if ((flags & SIZE_IN_WORDS) != 0) {
1325 object_size *= kPointerSize;
1326 }
1327 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1328
Ben Murdochb0fe1622011-05-05 13:52:32 +01001329 // Check relative positions of allocation top and limit addresses.
1330 // The values must be adjacent in memory to allow the use of LDM.
1331 // Also, assert that the registers are numbered such that the values
1332 // are loaded in the correct order.
Steve Blocka7e24c12009-10-30 11:49:00 +00001333 ExternalReference new_space_allocation_top =
Steve Block44f0eee2011-05-26 01:26:41 +01001334 ExternalReference::new_space_allocation_top_address(isolate());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001335 ExternalReference new_space_allocation_limit =
Steve Block44f0eee2011-05-26 01:26:41 +01001336 ExternalReference::new_space_allocation_limit_address(isolate());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001337 intptr_t top =
1338 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1339 intptr_t limit =
1340 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1341 ASSERT((limit - top) == kPointerSize);
1342 ASSERT(result.code() < ip.code());
1343
1344 // Set up allocation top address and object size registers.
1345 Register topaddr = scratch1;
1346 Register obj_size_reg = scratch2;
1347 mov(topaddr, Operand(new_space_allocation_top));
1348 mov(obj_size_reg, Operand(object_size));
1349
1350 // This code stores a temporary value in ip. This is OK, as the code below
1351 // does not need ip for implicit literal generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00001352 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001353 // Load allocation top into result and allocation limit into ip.
1354 ldm(ia, topaddr, result.bit() | ip.bit());
1355 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001356 if (emit_debug_code()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001357 // Assert that result actually contains top on entry. ip is used
1358 // immediately below so this use of ip does not cause difference with
1359 // respect to register content between debug and release mode.
1360 ldr(ip, MemOperand(topaddr));
1361 cmp(result, ip);
1362 Check(eq, "Unexpected allocation top");
1363 }
1364 // Load allocation limit into ip. Result already contains allocation top.
1365 ldr(ip, MemOperand(topaddr, limit - top));
Steve Blocka7e24c12009-10-30 11:49:00 +00001366 }
1367
1368 // Calculate new top and bail out if new space is exhausted. Use result
1369 // to calculate the new top.
Steve Block1e0659c2011-05-24 12:43:12 +01001370 add(scratch2, result, Operand(obj_size_reg), SetCC);
1371 b(cs, gc_required);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001372 cmp(scratch2, Operand(ip));
Steve Blocka7e24c12009-10-30 11:49:00 +00001373 b(hi, gc_required);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001374 str(scratch2, MemOperand(topaddr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001375
Ben Murdochb0fe1622011-05-05 13:52:32 +01001376 // Tag object if requested.
Steve Blocka7e24c12009-10-30 11:49:00 +00001377 if ((flags & TAG_OBJECT) != 0) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001378 add(result, result, Operand(kHeapObjectTag));
Steve Blocka7e24c12009-10-30 11:49:00 +00001379 }
1380}
1381
1382
1383void MacroAssembler::AllocateInNewSpace(Register object_size,
1384 Register result,
1385 Register scratch1,
1386 Register scratch2,
1387 Label* gc_required,
1388 AllocationFlags flags) {
John Reck59135872010-11-02 12:39:01 -07001389 if (!FLAG_inline_new) {
Steve Block44f0eee2011-05-26 01:26:41 +01001390 if (emit_debug_code()) {
John Reck59135872010-11-02 12:39:01 -07001391 // Trash the registers to simulate an allocation failure.
1392 mov(result, Operand(0x7091));
1393 mov(scratch1, Operand(0x7191));
1394 mov(scratch2, Operand(0x7291));
1395 }
1396 jmp(gc_required);
1397 return;
1398 }
1399
Ben Murdochb0fe1622011-05-05 13:52:32 +01001400 // Assert that the register arguments are different and that none of
1401 // them are ip. ip is used explicitly in the code generated below.
Steve Blocka7e24c12009-10-30 11:49:00 +00001402 ASSERT(!result.is(scratch1));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001403 ASSERT(!result.is(scratch2));
Steve Blocka7e24c12009-10-30 11:49:00 +00001404 ASSERT(!scratch1.is(scratch2));
Ben Murdochb0fe1622011-05-05 13:52:32 +01001405 ASSERT(!result.is(ip));
1406 ASSERT(!scratch1.is(ip));
1407 ASSERT(!scratch2.is(ip));
Steve Blocka7e24c12009-10-30 11:49:00 +00001408
Ben Murdochb0fe1622011-05-05 13:52:32 +01001409 // Check relative positions of allocation top and limit addresses.
1410 // The values must be adjacent in memory to allow the use of LDM.
1411 // Also, assert that the registers are numbered such that the values
1412 // are loaded in the correct order.
Steve Blocka7e24c12009-10-30 11:49:00 +00001413 ExternalReference new_space_allocation_top =
Steve Block44f0eee2011-05-26 01:26:41 +01001414 ExternalReference::new_space_allocation_top_address(isolate());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001415 ExternalReference new_space_allocation_limit =
Steve Block44f0eee2011-05-26 01:26:41 +01001416 ExternalReference::new_space_allocation_limit_address(isolate());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001417 intptr_t top =
1418 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
1419 intptr_t limit =
1420 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
1421 ASSERT((limit - top) == kPointerSize);
1422 ASSERT(result.code() < ip.code());
1423
1424 // Set up allocation top address.
1425 Register topaddr = scratch1;
1426 mov(topaddr, Operand(new_space_allocation_top));
1427
1428 // This code stores a temporary value in ip. This is OK, as the code below
1429 // does not need ip for implicit literal generation.
Steve Blocka7e24c12009-10-30 11:49:00 +00001430 if ((flags & RESULT_CONTAINS_TOP) == 0) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001431 // Load allocation top into result and allocation limit into ip.
1432 ldm(ia, topaddr, result.bit() | ip.bit());
1433 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001434 if (emit_debug_code()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001435 // Assert that result actually contains top on entry. ip is used
1436 // immediately below so this use of ip does not cause difference with
1437 // respect to register content between debug and release mode.
1438 ldr(ip, MemOperand(topaddr));
1439 cmp(result, ip);
1440 Check(eq, "Unexpected allocation top");
1441 }
1442 // Load allocation limit into ip. Result already contains allocation top.
1443 ldr(ip, MemOperand(topaddr, limit - top));
Steve Blocka7e24c12009-10-30 11:49:00 +00001444 }
1445
1446 // Calculate new top and bail out if new space is exhausted. Use result
Ben Murdochb0fe1622011-05-05 13:52:32 +01001447 // to calculate the new top. Object size may be in words so a shift is
1448 // required to get the number of bytes.
Kristian Monsen25f61362010-05-21 11:50:48 +01001449 if ((flags & SIZE_IN_WORDS) != 0) {
Steve Block1e0659c2011-05-24 12:43:12 +01001450 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
Kristian Monsen25f61362010-05-21 11:50:48 +01001451 } else {
Steve Block1e0659c2011-05-24 12:43:12 +01001452 add(scratch2, result, Operand(object_size), SetCC);
Kristian Monsen25f61362010-05-21 11:50:48 +01001453 }
Steve Block1e0659c2011-05-24 12:43:12 +01001454 b(cs, gc_required);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001455 cmp(scratch2, Operand(ip));
Steve Blocka7e24c12009-10-30 11:49:00 +00001456 b(hi, gc_required);
1457
Steve Blockd0582a62009-12-15 09:54:21 +00001458 // Update allocation top. result temporarily holds the new top.
Steve Block44f0eee2011-05-26 01:26:41 +01001459 if (emit_debug_code()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001460 tst(scratch2, Operand(kObjectAlignmentMask));
Steve Blockd0582a62009-12-15 09:54:21 +00001461 Check(eq, "Unaligned allocation in new space");
1462 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01001463 str(scratch2, MemOperand(topaddr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001464
1465 // Tag object if requested.
1466 if ((flags & TAG_OBJECT) != 0) {
1467 add(result, result, Operand(kHeapObjectTag));
1468 }
1469}
1470
1471
1472void MacroAssembler::UndoAllocationInNewSpace(Register object,
1473 Register scratch) {
1474 ExternalReference new_space_allocation_top =
Steve Block44f0eee2011-05-26 01:26:41 +01001475 ExternalReference::new_space_allocation_top_address(isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00001476
1477 // Make sure the object has no tag before resetting top.
1478 and_(object, object, Operand(~kHeapObjectTagMask));
1479#ifdef DEBUG
1480 // Check that the object un-allocated is below the current top.
1481 mov(scratch, Operand(new_space_allocation_top));
1482 ldr(scratch, MemOperand(scratch));
1483 cmp(object, scratch);
1484 Check(lt, "Undo allocation of non allocated memory");
1485#endif
1486 // Write the address of the object to un-allocate as the current top.
1487 mov(scratch, Operand(new_space_allocation_top));
1488 str(object, MemOperand(scratch));
1489}
1490
1491
Andrei Popescu31002712010-02-23 13:46:05 +00001492void MacroAssembler::AllocateTwoByteString(Register result,
1493 Register length,
1494 Register scratch1,
1495 Register scratch2,
1496 Register scratch3,
1497 Label* gc_required) {
1498 // Calculate the number of bytes needed for the characters in the string while
1499 // observing object alignment.
1500 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1501 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
1502 add(scratch1, scratch1,
1503 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
Kristian Monsen25f61362010-05-21 11:50:48 +01001504 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
Andrei Popescu31002712010-02-23 13:46:05 +00001505
1506 // Allocate two-byte string in new space.
1507 AllocateInNewSpace(scratch1,
1508 result,
1509 scratch2,
1510 scratch3,
1511 gc_required,
1512 TAG_OBJECT);
1513
1514 // Set the map, length and hash field.
Steve Block6ded16b2010-05-10 14:33:55 +01001515 InitializeNewString(result,
1516 length,
1517 Heap::kStringMapRootIndex,
1518 scratch1,
1519 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001520}
1521
1522
1523void MacroAssembler::AllocateAsciiString(Register result,
1524 Register length,
1525 Register scratch1,
1526 Register scratch2,
1527 Register scratch3,
1528 Label* gc_required) {
1529 // Calculate the number of bytes needed for the characters in the string while
1530 // observing object alignment.
1531 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
1532 ASSERT(kCharSize == 1);
1533 add(scratch1, length,
1534 Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
Kristian Monsen25f61362010-05-21 11:50:48 +01001535 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
Andrei Popescu31002712010-02-23 13:46:05 +00001536
1537 // Allocate ASCII string in new space.
1538 AllocateInNewSpace(scratch1,
1539 result,
1540 scratch2,
1541 scratch3,
1542 gc_required,
1543 TAG_OBJECT);
1544
1545 // Set the map, length and hash field.
Steve Block6ded16b2010-05-10 14:33:55 +01001546 InitializeNewString(result,
1547 length,
1548 Heap::kAsciiStringMapRootIndex,
1549 scratch1,
1550 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001551}
1552
1553
1554void MacroAssembler::AllocateTwoByteConsString(Register result,
1555 Register length,
1556 Register scratch1,
1557 Register scratch2,
1558 Label* gc_required) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001559 AllocateInNewSpace(ConsString::kSize,
Andrei Popescu31002712010-02-23 13:46:05 +00001560 result,
1561 scratch1,
1562 scratch2,
1563 gc_required,
1564 TAG_OBJECT);
Steve Block6ded16b2010-05-10 14:33:55 +01001565
1566 InitializeNewString(result,
1567 length,
1568 Heap::kConsStringMapRootIndex,
1569 scratch1,
1570 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001571}
1572
1573
1574void MacroAssembler::AllocateAsciiConsString(Register result,
1575 Register length,
1576 Register scratch1,
1577 Register scratch2,
1578 Label* gc_required) {
Kristian Monsen25f61362010-05-21 11:50:48 +01001579 AllocateInNewSpace(ConsString::kSize,
Andrei Popescu31002712010-02-23 13:46:05 +00001580 result,
1581 scratch1,
1582 scratch2,
1583 gc_required,
1584 TAG_OBJECT);
Steve Block6ded16b2010-05-10 14:33:55 +01001585
1586 InitializeNewString(result,
1587 length,
1588 Heap::kConsAsciiStringMapRootIndex,
1589 scratch1,
1590 scratch2);
Andrei Popescu31002712010-02-23 13:46:05 +00001591}
1592
1593
Steve Block6ded16b2010-05-10 14:33:55 +01001594void MacroAssembler::CompareObjectType(Register object,
Steve Blocka7e24c12009-10-30 11:49:00 +00001595 Register map,
1596 Register type_reg,
1597 InstanceType type) {
Steve Block6ded16b2010-05-10 14:33:55 +01001598 ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 CompareInstanceType(map, type_reg, type);
1600}
1601
1602
1603void MacroAssembler::CompareInstanceType(Register map,
1604 Register type_reg,
1605 InstanceType type) {
1606 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1607 cmp(type_reg, Operand(type));
1608}
1609
1610
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001611void MacroAssembler::CompareRoot(Register obj,
1612 Heap::RootListIndex index) {
1613 ASSERT(!obj.is(ip));
1614 LoadRoot(ip, index);
1615 cmp(obj, ip);
1616}
1617
1618
Andrei Popescu31002712010-02-23 13:46:05 +00001619void MacroAssembler::CheckMap(Register obj,
1620 Register scratch,
1621 Handle<Map> map,
1622 Label* fail,
1623 bool is_heap_object) {
1624 if (!is_heap_object) {
Steve Block1e0659c2011-05-24 12:43:12 +01001625 JumpIfSmi(obj, fail);
Andrei Popescu31002712010-02-23 13:46:05 +00001626 }
1627 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1628 mov(ip, Operand(map));
1629 cmp(scratch, ip);
1630 b(ne, fail);
1631}
1632
1633
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001634void MacroAssembler::CheckMap(Register obj,
1635 Register scratch,
1636 Heap::RootListIndex index,
1637 Label* fail,
1638 bool is_heap_object) {
1639 if (!is_heap_object) {
Steve Block1e0659c2011-05-24 12:43:12 +01001640 JumpIfSmi(obj, fail);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001641 }
1642 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1643 LoadRoot(ip, index);
1644 cmp(scratch, ip);
1645 b(ne, fail);
1646}
1647
1648
Steve Blocka7e24c12009-10-30 11:49:00 +00001649void MacroAssembler::TryGetFunctionPrototype(Register function,
1650 Register result,
1651 Register scratch,
1652 Label* miss) {
1653 // Check that the receiver isn't a smi.
Steve Block1e0659c2011-05-24 12:43:12 +01001654 JumpIfSmi(function, miss);
Steve Blocka7e24c12009-10-30 11:49:00 +00001655
1656 // Check that the function really is a function. Load map into result reg.
1657 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1658 b(ne, miss);
1659
1660 // Make sure that the function has an instance prototype.
1661 Label non_instance;
1662 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1663 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1664 b(ne, &non_instance);
1665
1666 // Get the prototype or initial map from the function.
1667 ldr(result,
1668 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1669
1670 // If the prototype or initial map is the hole, don't return it and
1671 // simply miss the cache instead. This will allow us to allocate a
1672 // prototype object on-demand in the runtime system.
1673 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1674 cmp(result, ip);
1675 b(eq, miss);
1676
1677 // If the function does not have an initial map, we're done.
1678 Label done;
1679 CompareObjectType(result, scratch, scratch, MAP_TYPE);
1680 b(ne, &done);
1681
1682 // Get the prototype from the initial map.
1683 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
1684 jmp(&done);
1685
1686 // Non-instance prototype: Fetch prototype from constructor field
1687 // in initial map.
1688 bind(&non_instance);
1689 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
1690
1691 // All done.
1692 bind(&done);
1693}
1694
1695
1696void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001697 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Steve Blocka7e24c12009-10-30 11:49:00 +00001698 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1699}
1700
1701
Andrei Popescu31002712010-02-23 13:46:05 +00001702void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Steve Block1e0659c2011-05-24 12:43:12 +01001703 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Andrei Popescu31002712010-02-23 13:46:05 +00001704 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1705}
1706
1707
Steve Block1e0659c2011-05-24 12:43:12 +01001708MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
1709 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1710 Object* result;
1711 { MaybeObject* maybe_result = stub->TryGetCode();
1712 if (!maybe_result->ToObject(&result)) return maybe_result;
1713 }
1714 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1715 return result;
1716}
1717
1718
1719static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1720 return ref0.address() - ref1.address();
1721}
1722
1723
1724MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001725 ExternalReference function, int stack_space) {
Steve Block1e0659c2011-05-24 12:43:12 +01001726 ExternalReference next_address =
1727 ExternalReference::handle_scope_next_address();
1728 const int kNextOffset = 0;
1729 const int kLimitOffset = AddressOffset(
1730 ExternalReference::handle_scope_limit_address(),
1731 next_address);
1732 const int kLevelOffset = AddressOffset(
1733 ExternalReference::handle_scope_level_address(),
1734 next_address);
1735
1736 // Allocate HandleScope in callee-save registers.
1737 mov(r7, Operand(next_address));
1738 ldr(r4, MemOperand(r7, kNextOffset));
1739 ldr(r5, MemOperand(r7, kLimitOffset));
1740 ldr(r6, MemOperand(r7, kLevelOffset));
1741 add(r6, r6, Operand(1));
1742 str(r6, MemOperand(r7, kLevelOffset));
1743
1744 // Native call returns to the DirectCEntry stub which redirects to the
1745 // return address pushed on stack (could have moved after GC).
1746 // DirectCEntry stub itself is generated early and never moves.
1747 DirectCEntryStub stub;
1748 stub.GenerateCall(this, function);
1749
1750 Label promote_scheduled_exception;
1751 Label delete_allocated_handles;
1752 Label leave_exit_frame;
1753
1754 // If result is non-zero, dereference to get the result value
1755 // otherwise set it to undefined.
1756 cmp(r0, Operand(0));
1757 LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
1758 ldr(r0, MemOperand(r0), ne);
1759
1760 // No more valid handles (the result handle was the last one). Restore
1761 // previous handle scope.
1762 str(r4, MemOperand(r7, kNextOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01001763 if (emit_debug_code()) {
Steve Block1e0659c2011-05-24 12:43:12 +01001764 ldr(r1, MemOperand(r7, kLevelOffset));
1765 cmp(r1, r6);
1766 Check(eq, "Unexpected level after return from api call");
1767 }
1768 sub(r6, r6, Operand(1));
1769 str(r6, MemOperand(r7, kLevelOffset));
1770 ldr(ip, MemOperand(r7, kLimitOffset));
1771 cmp(r5, ip);
1772 b(ne, &delete_allocated_handles);
1773
1774 // Check if the function scheduled an exception.
1775 bind(&leave_exit_frame);
1776 LoadRoot(r4, Heap::kTheHoleValueRootIndex);
Steve Block44f0eee2011-05-26 01:26:41 +01001777 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
Steve Block1e0659c2011-05-24 12:43:12 +01001778 ldr(r5, MemOperand(ip));
1779 cmp(r4, r5);
1780 b(ne, &promote_scheduled_exception);
1781
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001782 // LeaveExitFrame expects unwind space to be in a register.
Steve Block1e0659c2011-05-24 12:43:12 +01001783 mov(r4, Operand(stack_space));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001784 LeaveExitFrame(false, r4);
1785 mov(pc, lr);
Steve Block1e0659c2011-05-24 12:43:12 +01001786
1787 bind(&promote_scheduled_exception);
Steve Block44f0eee2011-05-26 01:26:41 +01001788 MaybeObject* result
1789 = TryTailCallExternalReference(
1790 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
1791 0,
1792 1);
Steve Block1e0659c2011-05-24 12:43:12 +01001793 if (result->IsFailure()) {
1794 return result;
1795 }
1796
1797 // HandleScope limit has changed. Delete allocated extensions.
1798 bind(&delete_allocated_handles);
1799 str(r5, MemOperand(r7, kLimitOffset));
1800 mov(r4, r0);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001801 PrepareCallCFunction(1, r5);
1802 mov(r0, Operand(ExternalReference::isolate_address()));
Steve Block44f0eee2011-05-26 01:26:41 +01001803 CallCFunction(
Ben Murdoch8b112d22011-06-08 16:22:53 +01001804 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
Steve Block1e0659c2011-05-24 12:43:12 +01001805 mov(r0, r4);
1806 jmp(&leave_exit_frame);
1807
1808 return result;
1809}
1810
1811
Steve Blocka7e24c12009-10-30 11:49:00 +00001812void MacroAssembler::IllegalOperation(int num_arguments) {
1813 if (num_arguments > 0) {
1814 add(sp, sp, Operand(num_arguments * kPointerSize));
1815 }
1816 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1817}
1818
1819
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001820void MacroAssembler::IndexFromHash(Register hash, Register index) {
1821 // If the hash field contains an array index pick it out. The assert checks
1822 // that the constants for the maximum number of digits for an array index
1823 // cached in the hash field and the number of bits reserved for it does not
1824 // conflict.
1825 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
1826 (1 << String::kArrayIndexValueBits));
1827 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
1828 // the low kHashShift bits.
1829 STATIC_ASSERT(kSmiTag == 0);
1830 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
1831 mov(index, Operand(hash, LSL, kSmiTagSize));
1832}
1833
1834
Steve Blockd0582a62009-12-15 09:54:21 +00001835void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
1836 Register outHighReg,
1837 Register outLowReg) {
1838 // ARMv7 VFP3 instructions to implement integer to double conversion.
1839 mov(r7, Operand(inReg, ASR, kSmiTagSize));
Leon Clarkee46be812010-01-19 14:06:41 +00001840 vmov(s15, r7);
Steve Block6ded16b2010-05-10 14:33:55 +01001841 vcvt_f64_s32(d7, s15);
Leon Clarkee46be812010-01-19 14:06:41 +00001842 vmov(outLowReg, outHighReg, d7);
Steve Blockd0582a62009-12-15 09:54:21 +00001843}
1844
1845
Steve Block8defd9f2010-07-08 12:39:36 +01001846void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
1847 DwVfpRegister result,
1848 Register scratch1,
1849 Register scratch2,
1850 Register heap_number_map,
1851 SwVfpRegister scratch3,
1852 Label* not_number,
1853 ObjectToDoubleFlags flags) {
1854 Label done;
1855 if ((flags & OBJECT_NOT_SMI) == 0) {
1856 Label not_smi;
Steve Block1e0659c2011-05-24 12:43:12 +01001857 JumpIfNotSmi(object, &not_smi);
Steve Block8defd9f2010-07-08 12:39:36 +01001858 // Remove smi tag and convert to double.
1859 mov(scratch1, Operand(object, ASR, kSmiTagSize));
1860 vmov(scratch3, scratch1);
1861 vcvt_f64_s32(result, scratch3);
1862 b(&done);
1863 bind(&not_smi);
1864 }
1865 // Check for heap number and load double value from it.
1866 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
1867 sub(scratch2, object, Operand(kHeapObjectTag));
1868 cmp(scratch1, heap_number_map);
1869 b(ne, not_number);
1870 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
1871 // If exponent is all ones the number is either a NaN or +/-Infinity.
1872 ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
1873 Sbfx(scratch1,
1874 scratch1,
1875 HeapNumber::kExponentShift,
1876 HeapNumber::kExponentBits);
1877 // All-one value sign extend to -1.
1878 cmp(scratch1, Operand(-1));
1879 b(eq, not_number);
1880 }
1881 vldr(result, scratch2, HeapNumber::kValueOffset);
1882 bind(&done);
1883}
1884
1885
1886void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
1887 DwVfpRegister value,
1888 Register scratch1,
1889 SwVfpRegister scratch2) {
1890 mov(scratch1, Operand(smi, ASR, kSmiTagSize));
1891 vmov(scratch2, scratch1);
1892 vcvt_f64_s32(value, scratch2);
1893}
1894
1895
Iain Merrick9ac36c92010-09-13 15:29:50 +01001896// Tries to get a signed int32 out of a double precision floating point heap
1897// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1898// 32bits signed integer range.
1899void MacroAssembler::ConvertToInt32(Register source,
1900 Register dest,
1901 Register scratch,
1902 Register scratch2,
Steve Block1e0659c2011-05-24 12:43:12 +01001903 DwVfpRegister double_scratch,
Iain Merrick9ac36c92010-09-13 15:29:50 +01001904 Label *not_int32) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001905 if (CpuFeatures::IsSupported(VFP3)) {
Iain Merrick9ac36c92010-09-13 15:29:50 +01001906 CpuFeatures::Scope scope(VFP3);
1907 sub(scratch, source, Operand(kHeapObjectTag));
Steve Block1e0659c2011-05-24 12:43:12 +01001908 vldr(double_scratch, scratch, HeapNumber::kValueOffset);
1909 vcvt_s32_f64(double_scratch.low(), double_scratch);
1910 vmov(dest, double_scratch.low());
Iain Merrick9ac36c92010-09-13 15:29:50 +01001911 // Signed vcvt instruction will saturate to the minimum (0x80000000) or
1912 // maximun (0x7fffffff) signed 32bits integer when the double is out of
1913 // range. When substracting one, the minimum signed integer becomes the
1914 // maximun signed integer.
1915 sub(scratch, dest, Operand(1));
1916 cmp(scratch, Operand(LONG_MAX - 1));
1917 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
1918 b(ge, not_int32);
1919 } else {
1920 // This code is faster for doubles that are in the ranges -0x7fffffff to
1921 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
1922 // the range of signed int32 values that are not Smis. Jumps to the label
1923 // 'not_int32' if the double isn't in the range -0x80000000.0 to
1924 // 0x80000000.0 (excluding the endpoints).
1925 Label right_exponent, done;
1926 // Get exponent word.
1927 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1928 // Get exponent alone in scratch2.
1929 Ubfx(scratch2,
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001930 scratch,
1931 HeapNumber::kExponentShift,
1932 HeapNumber::kExponentBits);
Iain Merrick9ac36c92010-09-13 15:29:50 +01001933 // Load dest with zero. We use this either for the final shift or
1934 // for the answer.
1935 mov(dest, Operand(0, RelocInfo::NONE));
1936 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1937 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1938 // the exponent that we are fastest at and also the highest exponent we can
1939 // handle here.
1940 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
1941 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
1942 // split it up to avoid a constant pool entry. You can't do that in general
1943 // for cmp because of the overflow flag, but we know the exponent is in the
1944 // range 0-2047 so there is no overflow.
1945 int fudge_factor = 0x400;
1946 sub(scratch2, scratch2, Operand(fudge_factor));
1947 cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
1948 // If we have a match of the int32-but-not-Smi exponent then skip some
1949 // logic.
1950 b(eq, &right_exponent);
1951 // If the exponent is higher than that then go to slow case. This catches
1952 // numbers that don't fit in a signed int32, infinities and NaNs.
1953 b(gt, not_int32);
1954
1955 // We know the exponent is smaller than 30 (biased). If it is less than
1956 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
1957 // it rounds to zero.
1958 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
1959 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
1960 // Dest already has a Smi zero.
1961 b(lt, &done);
1962
1963 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
1964 // get how much to shift down.
1965 rsb(dest, scratch2, Operand(30));
1966
1967 bind(&right_exponent);
1968 // Get the top bits of the mantissa.
1969 and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
1970 // Put back the implicit 1.
1971 orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
1972 // Shift up the mantissa bits to take up the space the exponent used to
1973 // take. We just orred in the implicit bit so that took care of one and
1974 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1975 // distance.
1976 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1977 mov(scratch2, Operand(scratch2, LSL, shift_distance));
1978 // Put sign in zero flag.
1979 tst(scratch, Operand(HeapNumber::kSignMask));
1980 // Get the second half of the double. For some exponents we don't
1981 // actually need this because the bits get shifted out again, but
1982 // it's probably slower to test than just to do it.
1983 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1984 // Shift down 22 bits to get the last 10 bits.
1985 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
1986 // Move down according to the exponent.
1987 mov(dest, Operand(scratch, LSR, dest));
1988 // Fix sign if sign bit was set.
1989 rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1990 bind(&done);
1991 }
1992}
1993
1994
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001995void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
1996 SwVfpRegister result,
1997 DwVfpRegister double_input,
1998 Register scratch1,
1999 Register scratch2,
2000 CheckForInexactConversion check_inexact) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002001 ASSERT(CpuFeatures::IsSupported(VFP3));
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002002 CpuFeatures::Scope scope(VFP3);
2003 Register prev_fpscr = scratch1;
2004 Register scratch = scratch2;
2005
2006 int32_t check_inexact_conversion =
2007 (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
2008
2009 // Set custom FPCSR:
2010 // - Set rounding mode.
2011 // - Clear vfp cumulative exception flags.
2012 // - Make sure Flush-to-zero mode control bit is unset.
2013 vmrs(prev_fpscr);
2014 bic(scratch,
2015 prev_fpscr,
2016 Operand(kVFPExceptionMask |
2017 check_inexact_conversion |
2018 kVFPRoundingModeMask |
2019 kVFPFlushToZeroMask));
2020 // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
2021 if (rounding_mode != kRoundToNearest) {
2022 orr(scratch, scratch, Operand(rounding_mode));
2023 }
2024 vmsr(scratch);
2025
2026 // Convert the argument to an integer.
2027 vcvt_s32_f64(result,
2028 double_input,
2029 (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
2030 : kFPSCRRounding);
2031
2032 // Retrieve FPSCR.
2033 vmrs(scratch);
2034 // Restore FPSCR.
2035 vmsr(prev_fpscr);
2036 // Check for vfp exceptions.
2037 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
2038}
2039
2040
Steve Block44f0eee2011-05-26 01:26:41 +01002041void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2042 Register input_high,
2043 Register input_low,
2044 Register scratch) {
2045 Label done, normal_exponent, restore_sign;
2046
2047 // Extract the biased exponent in result.
2048 Ubfx(result,
2049 input_high,
2050 HeapNumber::kExponentShift,
2051 HeapNumber::kExponentBits);
2052
2053 // Check for Infinity and NaNs, which should return 0.
2054 cmp(result, Operand(HeapNumber::kExponentMask));
2055 mov(result, Operand(0), LeaveCC, eq);
2056 b(eq, &done);
2057
2058 // Express exponent as delta to (number of mantissa bits + 31).
2059 sub(result,
2060 result,
2061 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2062 SetCC);
2063
2064 // If the delta is strictly positive, all bits would be shifted away,
2065 // which means that we can return 0.
2066 b(le, &normal_exponent);
2067 mov(result, Operand(0));
2068 b(&done);
2069
2070 bind(&normal_exponent);
2071 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2072 // Calculate shift.
2073 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
2074
2075 // Save the sign.
2076 Register sign = result;
2077 result = no_reg;
2078 and_(sign, input_high, Operand(HeapNumber::kSignMask));
2079
2080 // Set the implicit 1 before the mantissa part in input_high.
2081 orr(input_high,
2082 input_high,
2083 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2084 // Shift the mantissa bits to the correct position.
2085 // We don't need to clear non-mantissa bits as they will be shifted away.
2086 // If they weren't, it would mean that the answer is in the 32bit range.
2087 mov(input_high, Operand(input_high, LSL, scratch));
2088
2089 // Replace the shifted bits with bits from the lower mantissa word.
2090 Label pos_shift, shift_done;
2091 rsb(scratch, scratch, Operand(32), SetCC);
2092 b(&pos_shift, ge);
2093
2094 // Negate scratch.
2095 rsb(scratch, scratch, Operand(0));
2096 mov(input_low, Operand(input_low, LSL, scratch));
2097 b(&shift_done);
2098
2099 bind(&pos_shift);
2100 mov(input_low, Operand(input_low, LSR, scratch));
2101
2102 bind(&shift_done);
2103 orr(input_high, input_high, Operand(input_low));
2104 // Restore sign if necessary.
2105 cmp(sign, Operand(0));
2106 result = sign;
2107 sign = no_reg;
2108 rsb(result, input_high, Operand(0), LeaveCC, ne);
2109 mov(result, input_high, LeaveCC, eq);
2110 bind(&done);
2111}
2112
2113
2114void MacroAssembler::EmitECMATruncate(Register result,
2115 DwVfpRegister double_input,
2116 SwVfpRegister single_scratch,
2117 Register scratch,
2118 Register input_high,
2119 Register input_low) {
2120 CpuFeatures::Scope scope(VFP3);
2121 ASSERT(!input_high.is(result));
2122 ASSERT(!input_low.is(result));
2123 ASSERT(!input_low.is(input_high));
2124 ASSERT(!scratch.is(result) &&
2125 !scratch.is(input_high) &&
2126 !scratch.is(input_low));
2127 ASSERT(!single_scratch.is(double_input.low()) &&
2128 !single_scratch.is(double_input.high()));
2129
2130 Label done;
2131
2132 // Clear cumulative exception flags.
2133 ClearFPSCRBits(kVFPExceptionMask, scratch);
2134 // Try a conversion to a signed integer.
2135 vcvt_s32_f64(single_scratch, double_input);
2136 vmov(result, single_scratch);
2137 // Retrieve he FPSCR.
2138 vmrs(scratch);
2139 // Check for overflow and NaNs.
2140 tst(scratch, Operand(kVFPOverflowExceptionBit |
2141 kVFPUnderflowExceptionBit |
2142 kVFPInvalidOpExceptionBit));
2143 // If we had no exceptions we are done.
2144 b(eq, &done);
2145
2146 // Load the double value and perform a manual truncation.
2147 vmov(input_low, input_high, double_input);
2148 EmitOutOfInt32RangeTruncate(result,
2149 input_high,
2150 input_low,
2151 scratch);
2152 bind(&done);
2153}
2154
2155
Andrei Popescu31002712010-02-23 13:46:05 +00002156void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2157 Register src,
2158 int num_least_bits) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002159 if (CpuFeatures::IsSupported(ARMv7)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002160 ubfx(dst, src, kSmiTagSize, num_least_bits);
Andrei Popescu31002712010-02-23 13:46:05 +00002161 } else {
2162 mov(dst, Operand(src, ASR, kSmiTagSize));
2163 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2164 }
2165}
2166
2167
Steve Block1e0659c2011-05-24 12:43:12 +01002168void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2169 Register src,
2170 int num_least_bits) {
2171 and_(dst, src, Operand((1 << num_least_bits) - 1));
2172}
2173
2174
Steve Block44f0eee2011-05-26 01:26:41 +01002175void MacroAssembler::CallRuntime(const Runtime::Function* f,
2176 int num_arguments) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002177 // All parameters are on the stack. r0 has the return value after call.
2178
2179 // If the expected number of arguments of the runtime function is
2180 // constant, we check that the actual number of arguments match the
2181 // expectation.
2182 if (f->nargs >= 0 && f->nargs != num_arguments) {
2183 IllegalOperation(num_arguments);
2184 return;
2185 }
2186
Leon Clarke4515c472010-02-03 11:58:03 +00002187 // TODO(1236192): Most runtime routines don't need the number of
2188 // arguments passed in because it is constant. At some point we
2189 // should remove this need and make the runtime routine entry code
2190 // smarter.
2191 mov(r0, Operand(num_arguments));
Steve Block44f0eee2011-05-26 01:26:41 +01002192 mov(r1, Operand(ExternalReference(f, isolate())));
Leon Clarke4515c472010-02-03 11:58:03 +00002193 CEntryStub stub(1);
Steve Blocka7e24c12009-10-30 11:49:00 +00002194 CallStub(&stub);
2195}
2196
2197
2198void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2199 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2200}
2201
2202
Ben Murdochb0fe1622011-05-05 13:52:32 +01002203void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
Steve Block44f0eee2011-05-26 01:26:41 +01002204 const Runtime::Function* function = Runtime::FunctionForId(id);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002205 mov(r0, Operand(function->nargs));
Steve Block44f0eee2011-05-26 01:26:41 +01002206 mov(r1, Operand(ExternalReference(function, isolate())));
Ben Murdochb0fe1622011-05-05 13:52:32 +01002207 CEntryStub stub(1);
2208 stub.SaveDoubles();
2209 CallStub(&stub);
2210}
2211
2212
Andrei Popescu402d9372010-02-26 13:31:12 +00002213void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2214 int num_arguments) {
2215 mov(r0, Operand(num_arguments));
2216 mov(r1, Operand(ext));
2217
2218 CEntryStub stub(1);
2219 CallStub(&stub);
2220}
2221
2222
Steve Block6ded16b2010-05-10 14:33:55 +01002223void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2224 int num_arguments,
2225 int result_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002226 // TODO(1236192): Most runtime routines don't need the number of
2227 // arguments passed in because it is constant. At some point we
2228 // should remove this need and make the runtime routine entry code
2229 // smarter.
2230 mov(r0, Operand(num_arguments));
Steve Block6ded16b2010-05-10 14:33:55 +01002231 JumpToExternalReference(ext);
Steve Blocka7e24c12009-10-30 11:49:00 +00002232}
2233
2234
Steve Block1e0659c2011-05-24 12:43:12 +01002235MaybeObject* MacroAssembler::TryTailCallExternalReference(
2236 const ExternalReference& ext, int num_arguments, int result_size) {
2237 // TODO(1236192): Most runtime routines don't need the number of
2238 // arguments passed in because it is constant. At some point we
2239 // should remove this need and make the runtime routine entry code
2240 // smarter.
2241 mov(r0, Operand(num_arguments));
2242 return TryJumpToExternalReference(ext);
2243}
2244
2245
Steve Block6ded16b2010-05-10 14:33:55 +01002246void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2247 int num_arguments,
2248 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01002249 TailCallExternalReference(ExternalReference(fid, isolate()),
2250 num_arguments,
2251 result_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002252}
2253
2254
2255void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002256#if defined(__thumb__)
2257 // Thumb mode builtin.
2258 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2259#endif
2260 mov(r1, Operand(builtin));
2261 CEntryStub stub(1);
2262 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2263}
2264
2265
Steve Block1e0659c2011-05-24 12:43:12 +01002266MaybeObject* MacroAssembler::TryJumpToExternalReference(
2267 const ExternalReference& builtin) {
2268#if defined(__thumb__)
2269 // Thumb mode builtin.
2270 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2271#endif
2272 mov(r1, Operand(builtin));
2273 CEntryStub stub(1);
2274 return TryTailCallStub(&stub);
2275}
2276
2277
Steve Blocka7e24c12009-10-30 11:49:00 +00002278void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Ben Murdochb8e0da22011-05-16 14:20:40 +01002279 InvokeJSFlags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002280 CallWrapper* call_wrapper) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002281 GetBuiltinEntry(r2, id);
Steve Blocka7e24c12009-10-30 11:49:00 +00002282 if (flags == CALL_JS) {
Steve Block44f0eee2011-05-26 01:26:41 +01002283 if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Andrei Popescu402d9372010-02-26 13:31:12 +00002284 Call(r2);
Steve Block44f0eee2011-05-26 01:26:41 +01002285 if (call_wrapper != NULL) call_wrapper->AfterCall();
Steve Blocka7e24c12009-10-30 11:49:00 +00002286 } else {
2287 ASSERT(flags == JUMP_JS);
Andrei Popescu402d9372010-02-26 13:31:12 +00002288 Jump(r2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002289 }
2290}
2291
2292
Steve Block791712a2010-08-27 10:21:07 +01002293void MacroAssembler::GetBuiltinFunction(Register target,
2294 Builtins::JavaScript id) {
Steve Block6ded16b2010-05-10 14:33:55 +01002295 // Load the builtins object into target register.
2296 ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2297 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
Andrei Popescu402d9372010-02-26 13:31:12 +00002298 // Load the JavaScript builtin function from the builtins object.
Steve Block6ded16b2010-05-10 14:33:55 +01002299 ldr(target, FieldMemOperand(target,
Steve Block791712a2010-08-27 10:21:07 +01002300 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2301}
2302
2303
2304void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2305 ASSERT(!target.is(r1));
2306 GetBuiltinFunction(r1, id);
2307 // Load the code entry point from the builtins object.
2308 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +00002309}
2310
2311
2312void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2313 Register scratch1, Register scratch2) {
2314 if (FLAG_native_code_counters && counter->Enabled()) {
2315 mov(scratch1, Operand(value));
2316 mov(scratch2, Operand(ExternalReference(counter)));
2317 str(scratch1, MemOperand(scratch2));
2318 }
2319}
2320
2321
2322void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2323 Register scratch1, Register scratch2) {
2324 ASSERT(value > 0);
2325 if (FLAG_native_code_counters && counter->Enabled()) {
2326 mov(scratch2, Operand(ExternalReference(counter)));
2327 ldr(scratch1, MemOperand(scratch2));
2328 add(scratch1, scratch1, Operand(value));
2329 str(scratch1, MemOperand(scratch2));
2330 }
2331}
2332
2333
2334void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2335 Register scratch1, Register scratch2) {
2336 ASSERT(value > 0);
2337 if (FLAG_native_code_counters && counter->Enabled()) {
2338 mov(scratch2, Operand(ExternalReference(counter)));
2339 ldr(scratch1, MemOperand(scratch2));
2340 sub(scratch1, scratch1, Operand(value));
2341 str(scratch1, MemOperand(scratch2));
2342 }
2343}
2344
2345
Steve Block1e0659c2011-05-24 12:43:12 +01002346void MacroAssembler::Assert(Condition cond, const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01002347 if (emit_debug_code())
Steve Block1e0659c2011-05-24 12:43:12 +01002348 Check(cond, msg);
Steve Blocka7e24c12009-10-30 11:49:00 +00002349}
2350
2351
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002352void MacroAssembler::AssertRegisterIsRoot(Register reg,
2353 Heap::RootListIndex index) {
Steve Block44f0eee2011-05-26 01:26:41 +01002354 if (emit_debug_code()) {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002355 LoadRoot(ip, index);
2356 cmp(reg, ip);
2357 Check(eq, "Register did not match expected root");
2358 }
2359}
2360
2361
Iain Merrick75681382010-08-19 15:07:18 +01002362void MacroAssembler::AssertFastElements(Register elements) {
Steve Block44f0eee2011-05-26 01:26:41 +01002363 if (emit_debug_code()) {
Iain Merrick75681382010-08-19 15:07:18 +01002364 ASSERT(!elements.is(ip));
2365 Label ok;
2366 push(elements);
2367 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2368 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2369 cmp(elements, ip);
2370 b(eq, &ok);
2371 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2372 cmp(elements, ip);
2373 b(eq, &ok);
2374 Abort("JSObject with fast elements map has slow elements");
2375 bind(&ok);
2376 pop(elements);
2377 }
2378}
2379
2380
Steve Block1e0659c2011-05-24 12:43:12 +01002381void MacroAssembler::Check(Condition cond, const char* msg) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002382 Label L;
Steve Block1e0659c2011-05-24 12:43:12 +01002383 b(cond, &L);
Steve Blocka7e24c12009-10-30 11:49:00 +00002384 Abort(msg);
2385 // will not return here
2386 bind(&L);
2387}
2388
2389
2390void MacroAssembler::Abort(const char* msg) {
Steve Block8defd9f2010-07-08 12:39:36 +01002391 Label abort_start;
2392 bind(&abort_start);
Steve Blocka7e24c12009-10-30 11:49:00 +00002393 // We want to pass the msg string like a smi to avoid GC
2394 // problems, however msg is not guaranteed to be aligned
2395 // properly. Instead, we pass an aligned pointer that is
2396 // a proper v8 smi, but also pass the alignment difference
2397 // from the real pointer as a smi.
2398 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2399 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2400 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2401#ifdef DEBUG
2402 if (msg != NULL) {
2403 RecordComment("Abort message: ");
2404 RecordComment(msg);
2405 }
2406#endif
Steve Blockd0582a62009-12-15 09:54:21 +00002407 // Disable stub call restrictions to always allow calls to abort.
Ben Murdoch086aeea2011-05-13 15:57:08 +01002408 AllowStubCallsScope allow_scope(this, true);
Steve Blockd0582a62009-12-15 09:54:21 +00002409
Steve Blocka7e24c12009-10-30 11:49:00 +00002410 mov(r0, Operand(p0));
2411 push(r0);
2412 mov(r0, Operand(Smi::FromInt(p1 - p0)));
2413 push(r0);
2414 CallRuntime(Runtime::kAbort, 2);
2415 // will not return here
Steve Block8defd9f2010-07-08 12:39:36 +01002416 if (is_const_pool_blocked()) {
2417 // If the calling code cares about the exact number of
2418 // instructions generated, we insert padding here to keep the size
2419 // of the Abort macro constant.
2420 static const int kExpectedAbortInstructions = 10;
2421 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2422 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2423 while (abort_instructions++ < kExpectedAbortInstructions) {
2424 nop();
2425 }
2426 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002427}
2428
2429
Steve Blockd0582a62009-12-15 09:54:21 +00002430void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2431 if (context_chain_length > 0) {
2432 // Move up the chain of contexts to the context containing the slot.
2433 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
2434 // Load the function context (which is the incoming, outer context).
2435 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2436 for (int i = 1; i < context_chain_length; i++) {
2437 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
2438 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
2439 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002440 } else {
2441 // Slot is in the current function context. Move it into the
2442 // destination register in case we store into it (the write barrier
2443 // cannot be allowed to destroy the context in esi).
2444 mov(dst, cp);
2445 }
2446
2447 // We should not have found a 'with' context by walking the context chain
2448 // (i.e., the static scope chain and runtime context chain do not agree).
2449 // A variable occurring in such a scope should have slot type LOOKUP and
2450 // not CONTEXT.
Steve Block44f0eee2011-05-26 01:26:41 +01002451 if (emit_debug_code()) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002452 ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
2453 cmp(dst, ip);
2454 Check(eq, "Yo dawg, I heard you liked function contexts "
2455 "so I put function contexts in all your contexts");
Steve Blockd0582a62009-12-15 09:54:21 +00002456 }
2457}
2458
2459
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002460void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2461 // Load the global or builtins object from the current context.
2462 ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
2463 // Load the global context from the global or builtins object.
2464 ldr(function, FieldMemOperand(function,
2465 GlobalObject::kGlobalContextOffset));
2466 // Load the function from the global context.
2467 ldr(function, MemOperand(function, Context::SlotOffset(index)));
2468}
2469
2470
2471void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2472 Register map,
2473 Register scratch) {
2474 // Load the initial map. The global functions all have initial maps.
2475 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01002476 if (emit_debug_code()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002477 Label ok, fail;
2478 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
2479 b(&ok);
2480 bind(&fail);
2481 Abort("Global functions must have initial map");
2482 bind(&ok);
2483 }
2484}
2485
2486
Steve Block1e0659c2011-05-24 12:43:12 +01002487void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2488 Register reg,
2489 Register scratch,
2490 Label* not_power_of_two_or_zero) {
2491 sub(scratch, reg, Operand(1), SetCC);
2492 b(mi, not_power_of_two_or_zero);
2493 tst(scratch, reg);
2494 b(ne, not_power_of_two_or_zero);
2495}
2496
2497
Steve Block44f0eee2011-05-26 01:26:41 +01002498void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2499 Register reg,
2500 Register scratch,
2501 Label* zero_and_neg,
2502 Label* not_power_of_two) {
2503 sub(scratch, reg, Operand(1), SetCC);
2504 b(mi, zero_and_neg);
2505 tst(scratch, reg);
2506 b(ne, not_power_of_two);
2507}
2508
2509
Andrei Popescu31002712010-02-23 13:46:05 +00002510void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2511 Register reg2,
2512 Label* on_not_both_smi) {
Steve Block1e0659c2011-05-24 12:43:12 +01002513 STATIC_ASSERT(kSmiTag == 0);
Andrei Popescu31002712010-02-23 13:46:05 +00002514 tst(reg1, Operand(kSmiTagMask));
2515 tst(reg2, Operand(kSmiTagMask), eq);
2516 b(ne, on_not_both_smi);
2517}
2518
2519
2520void MacroAssembler::JumpIfEitherSmi(Register reg1,
2521 Register reg2,
2522 Label* on_either_smi) {
Steve Block1e0659c2011-05-24 12:43:12 +01002523 STATIC_ASSERT(kSmiTag == 0);
Andrei Popescu31002712010-02-23 13:46:05 +00002524 tst(reg1, Operand(kSmiTagMask));
2525 tst(reg2, Operand(kSmiTagMask), ne);
2526 b(eq, on_either_smi);
2527}
2528
2529
Iain Merrick75681382010-08-19 15:07:18 +01002530void MacroAssembler::AbortIfSmi(Register object) {
Steve Block1e0659c2011-05-24 12:43:12 +01002531 STATIC_ASSERT(kSmiTag == 0);
Iain Merrick75681382010-08-19 15:07:18 +01002532 tst(object, Operand(kSmiTagMask));
2533 Assert(ne, "Operand is a smi");
2534}
2535
2536
Steve Block1e0659c2011-05-24 12:43:12 +01002537void MacroAssembler::AbortIfNotSmi(Register object) {
2538 STATIC_ASSERT(kSmiTag == 0);
2539 tst(object, Operand(kSmiTagMask));
2540 Assert(eq, "Operand is not smi");
2541}
2542
2543
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002544void MacroAssembler::AbortIfNotString(Register object) {
2545 STATIC_ASSERT(kSmiTag == 0);
2546 tst(object, Operand(kSmiTagMask));
2547 Assert(ne, "Operand is not a string");
2548 push(object);
2549 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2550 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2551 pop(object);
2552 Assert(lo, "Operand is not a string");
2553}
2554
2555
2556
Steve Block1e0659c2011-05-24 12:43:12 +01002557void MacroAssembler::AbortIfNotRootValue(Register src,
2558 Heap::RootListIndex root_value_index,
2559 const char* message) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002560 CompareRoot(src, root_value_index);
Steve Block1e0659c2011-05-24 12:43:12 +01002561 Assert(eq, message);
2562}
2563
2564
2565void MacroAssembler::JumpIfNotHeapNumber(Register object,
2566 Register heap_number_map,
2567 Register scratch,
2568 Label* on_not_heap_number) {
2569 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2570 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2571 cmp(scratch, heap_number_map);
2572 b(ne, on_not_heap_number);
2573}
2574
2575
Leon Clarked91b9f72010-01-27 17:25:45 +00002576void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
2577 Register first,
2578 Register second,
2579 Register scratch1,
2580 Register scratch2,
2581 Label* failure) {
2582 // Test that both first and second are sequential ASCII strings.
2583 // Assume that they are non-smis.
2584 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2585 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2586 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2587 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01002588
2589 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
2590 scratch2,
2591 scratch1,
2592 scratch2,
2593 failure);
Leon Clarked91b9f72010-01-27 17:25:45 +00002594}
2595
2596void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
2597 Register second,
2598 Register scratch1,
2599 Register scratch2,
2600 Label* failure) {
2601 // Check that neither is a smi.
Steve Block1e0659c2011-05-24 12:43:12 +01002602 STATIC_ASSERT(kSmiTag == 0);
Leon Clarked91b9f72010-01-27 17:25:45 +00002603 and_(scratch1, first, Operand(second));
2604 tst(scratch1, Operand(kSmiTagMask));
2605 b(eq, failure);
2606 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
2607 second,
2608 scratch1,
2609 scratch2,
2610 failure);
2611}
2612
Steve Blockd0582a62009-12-15 09:54:21 +00002613
Steve Block6ded16b2010-05-10 14:33:55 +01002614// Allocates a heap number or jumps to the need_gc label if the young space
2615// is full and a scavenge is needed.
2616void MacroAssembler::AllocateHeapNumber(Register result,
2617 Register scratch1,
2618 Register scratch2,
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002619 Register heap_number_map,
Steve Block6ded16b2010-05-10 14:33:55 +01002620 Label* gc_required) {
2621 // Allocate an object in the heap for the heap number and tag it as a heap
2622 // object.
Kristian Monsen25f61362010-05-21 11:50:48 +01002623 AllocateInNewSpace(HeapNumber::kSize,
Steve Block6ded16b2010-05-10 14:33:55 +01002624 result,
2625 scratch1,
2626 scratch2,
2627 gc_required,
2628 TAG_OBJECT);
2629
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002630 // Store heap number map in the allocated object.
2631 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2632 str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01002633}
2634
2635
Steve Block8defd9f2010-07-08 12:39:36 +01002636void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2637 DwVfpRegister value,
2638 Register scratch1,
2639 Register scratch2,
2640 Register heap_number_map,
2641 Label* gc_required) {
2642 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
2643 sub(scratch1, result, Operand(kHeapObjectTag));
2644 vstr(value, scratch1, HeapNumber::kValueOffset);
2645}
2646
2647
Ben Murdochbb769b22010-08-11 14:56:33 +01002648// Copies a fixed number of fields of heap objects from src to dst.
2649void MacroAssembler::CopyFields(Register dst,
2650 Register src,
2651 RegList temps,
2652 int field_count) {
2653 // At least one bit set in the first 15 registers.
2654 ASSERT((temps & ((1 << 15) - 1)) != 0);
2655 ASSERT((temps & dst.bit()) == 0);
2656 ASSERT((temps & src.bit()) == 0);
2657 // Primitive implementation using only one temporary register.
2658
2659 Register tmp = no_reg;
2660 // Find a temp register in temps list.
2661 for (int i = 0; i < 15; i++) {
2662 if ((temps & (1 << i)) != 0) {
2663 tmp.set_code(i);
2664 break;
2665 }
2666 }
2667 ASSERT(!tmp.is(no_reg));
2668
2669 for (int i = 0; i < field_count; i++) {
2670 ldr(tmp, FieldMemOperand(src, i * kPointerSize));
2671 str(tmp, FieldMemOperand(dst, i * kPointerSize));
2672 }
2673}
2674
2675
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002676void MacroAssembler::CopyBytes(Register src,
2677 Register dst,
2678 Register length,
2679 Register scratch) {
2680 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2681
2682 // Align src before copying in word size chunks.
2683 bind(&align_loop);
2684 cmp(length, Operand(0));
2685 b(eq, &done);
2686 bind(&align_loop_1);
2687 tst(src, Operand(kPointerSize - 1));
2688 b(eq, &word_loop);
2689 ldrb(scratch, MemOperand(src, 1, PostIndex));
2690 strb(scratch, MemOperand(dst, 1, PostIndex));
2691 sub(length, length, Operand(1), SetCC);
2692 b(ne, &byte_loop_1);
2693
2694 // Copy bytes in word size chunks.
2695 bind(&word_loop);
Steve Block44f0eee2011-05-26 01:26:41 +01002696 if (emit_debug_code()) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002697 tst(src, Operand(kPointerSize - 1));
2698 Assert(eq, "Expecting alignment for CopyBytes");
2699 }
2700 cmp(length, Operand(kPointerSize));
2701 b(lt, &byte_loop);
2702 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
2703#if CAN_USE_UNALIGNED_ACCESSES
2704 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
2705#else
2706 strb(scratch, MemOperand(dst, 1, PostIndex));
2707 mov(scratch, Operand(scratch, LSR, 8));
2708 strb(scratch, MemOperand(dst, 1, PostIndex));
2709 mov(scratch, Operand(scratch, LSR, 8));
2710 strb(scratch, MemOperand(dst, 1, PostIndex));
2711 mov(scratch, Operand(scratch, LSR, 8));
2712 strb(scratch, MemOperand(dst, 1, PostIndex));
2713#endif
2714 sub(length, length, Operand(kPointerSize));
2715 b(&word_loop);
2716
2717 // Copy the last bytes if any left.
2718 bind(&byte_loop);
2719 cmp(length, Operand(0));
2720 b(eq, &done);
2721 bind(&byte_loop_1);
2722 ldrb(scratch, MemOperand(src, 1, PostIndex));
2723 strb(scratch, MemOperand(dst, 1, PostIndex));
2724 sub(length, length, Operand(1), SetCC);
2725 b(ne, &byte_loop_1);
2726 bind(&done);
2727}
2728
2729
Steve Block8defd9f2010-07-08 12:39:36 +01002730void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
2731 Register source, // Input.
2732 Register scratch) {
Steve Block1e0659c2011-05-24 12:43:12 +01002733 ASSERT(!zeros.is(source) || !source.is(scratch));
Steve Block8defd9f2010-07-08 12:39:36 +01002734 ASSERT(!zeros.is(scratch));
2735 ASSERT(!scratch.is(ip));
2736 ASSERT(!source.is(ip));
2737 ASSERT(!zeros.is(ip));
Steve Block6ded16b2010-05-10 14:33:55 +01002738#ifdef CAN_USE_ARMV5_INSTRUCTIONS
2739 clz(zeros, source); // This instruction is only supported after ARM5.
2740#else
Iain Merrick9ac36c92010-09-13 15:29:50 +01002741 mov(zeros, Operand(0, RelocInfo::NONE));
Steve Block8defd9f2010-07-08 12:39:36 +01002742 Move(scratch, source);
Steve Block6ded16b2010-05-10 14:33:55 +01002743 // Top 16.
2744 tst(scratch, Operand(0xffff0000));
2745 add(zeros, zeros, Operand(16), LeaveCC, eq);
2746 mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
2747 // Top 8.
2748 tst(scratch, Operand(0xff000000));
2749 add(zeros, zeros, Operand(8), LeaveCC, eq);
2750 mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
2751 // Top 4.
2752 tst(scratch, Operand(0xf0000000));
2753 add(zeros, zeros, Operand(4), LeaveCC, eq);
2754 mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
2755 // Top 2.
2756 tst(scratch, Operand(0xc0000000));
2757 add(zeros, zeros, Operand(2), LeaveCC, eq);
2758 mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
2759 // Top bit.
2760 tst(scratch, Operand(0x80000000u));
2761 add(zeros, zeros, Operand(1), LeaveCC, eq);
2762#endif
2763}
2764
2765
2766void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2767 Register first,
2768 Register second,
2769 Register scratch1,
2770 Register scratch2,
2771 Label* failure) {
2772 int kFlatAsciiStringMask =
2773 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2774 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2775 and_(scratch1, first, Operand(kFlatAsciiStringMask));
2776 and_(scratch2, second, Operand(kFlatAsciiStringMask));
2777 cmp(scratch1, Operand(kFlatAsciiStringTag));
2778 // Ignore second test if first test failed.
2779 cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
2780 b(ne, failure);
2781}
2782
2783
2784void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
2785 Register scratch,
2786 Label* failure) {
2787 int kFlatAsciiStringMask =
2788 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2789 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2790 and_(scratch, type, Operand(kFlatAsciiStringMask));
2791 cmp(scratch, Operand(kFlatAsciiStringTag));
2792 b(ne, failure);
2793}
2794
Steve Block44f0eee2011-05-26 01:26:41 +01002795static const int kRegisterPassedArguments = 4;
Steve Block6ded16b2010-05-10 14:33:55 +01002796
2797void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
2798 int frame_alignment = ActivationFrameAlignment();
Steve Block44f0eee2011-05-26 01:26:41 +01002799
Steve Block6ded16b2010-05-10 14:33:55 +01002800 // Up to four simple arguments are passed in registers r0..r3.
Steve Block44f0eee2011-05-26 01:26:41 +01002801 int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
2802 0 : num_arguments - kRegisterPassedArguments;
Steve Block6ded16b2010-05-10 14:33:55 +01002803 if (frame_alignment > kPointerSize) {
2804 // Make stack end at alignment and make room for num_arguments - 4 words
2805 // and the original value of sp.
2806 mov(scratch, sp);
2807 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
2808 ASSERT(IsPowerOf2(frame_alignment));
2809 and_(sp, sp, Operand(-frame_alignment));
2810 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2811 } else {
2812 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2813 }
2814}
2815
2816
2817void MacroAssembler::CallCFunction(ExternalReference function,
2818 int num_arguments) {
Steve Block44f0eee2011-05-26 01:26:41 +01002819 CallCFunctionHelper(no_reg, function, ip, num_arguments);
2820}
2821
2822void MacroAssembler::CallCFunction(Register function,
2823 Register scratch,
2824 int num_arguments) {
2825 CallCFunctionHelper(function,
2826 ExternalReference::the_hole_value_location(isolate()),
2827 scratch,
2828 num_arguments);
Steve Block6ded16b2010-05-10 14:33:55 +01002829}
2830
2831
Steve Block44f0eee2011-05-26 01:26:41 +01002832void MacroAssembler::CallCFunctionHelper(Register function,
2833 ExternalReference function_reference,
2834 Register scratch,
2835 int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01002836 // Make sure that the stack is aligned before calling a C function unless
2837 // running in the simulator. The simulator has its own alignment check which
2838 // provides more information.
2839#if defined(V8_HOST_ARCH_ARM)
Steve Block44f0eee2011-05-26 01:26:41 +01002840 if (emit_debug_code()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002841 int frame_alignment = OS::ActivationFrameAlignment();
2842 int frame_alignment_mask = frame_alignment - 1;
2843 if (frame_alignment > kPointerSize) {
2844 ASSERT(IsPowerOf2(frame_alignment));
2845 Label alignment_as_expected;
2846 tst(sp, Operand(frame_alignment_mask));
2847 b(eq, &alignment_as_expected);
2848 // Don't use Check here, as it will call Runtime_Abort possibly
2849 // re-entering here.
2850 stop("Unexpected alignment");
2851 bind(&alignment_as_expected);
2852 }
2853 }
2854#endif
2855
2856 // Just call directly. The function called cannot cause a GC, or
2857 // allow preemption, so the return address in the link register
2858 // stays correct.
Steve Block44f0eee2011-05-26 01:26:41 +01002859 if (function.is(no_reg)) {
2860 mov(scratch, Operand(function_reference));
2861 function = scratch;
2862 }
Steve Block6ded16b2010-05-10 14:33:55 +01002863 Call(function);
Steve Block44f0eee2011-05-26 01:26:41 +01002864 int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
2865 0 : num_arguments - kRegisterPassedArguments;
Steve Block6ded16b2010-05-10 14:33:55 +01002866 if (OS::ActivationFrameAlignment() > kPointerSize) {
2867 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2868 } else {
2869 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
2870 }
2871}
2872
2873
Steve Block1e0659c2011-05-24 12:43:12 +01002874void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
2875 Register result) {
2876 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
2877 const int32_t kPCRegOffset = 2 * kPointerSize;
2878 ldr(result, MemOperand(ldr_location));
Steve Block44f0eee2011-05-26 01:26:41 +01002879 if (emit_debug_code()) {
Steve Block1e0659c2011-05-24 12:43:12 +01002880 // Check that the instruction is a ldr reg, [pc + offset] .
2881 and_(result, result, Operand(kLdrPCPattern));
2882 cmp(result, Operand(kLdrPCPattern));
2883 Check(eq, "The instruction to patch should be a load from pc.");
2884 // Result was clobbered. Restore it.
2885 ldr(result, MemOperand(ldr_location));
2886 }
2887 // Get the address of the constant.
2888 and_(result, result, Operand(kLdrOffsetMask));
2889 add(result, ldr_location, Operand(result));
2890 add(result, result, Operand(kPCRegOffset));
2891}
2892
2893
Steve Blocka7e24c12009-10-30 11:49:00 +00002894CodePatcher::CodePatcher(byte* address, int instructions)
2895 : address_(address),
2896 instructions_(instructions),
2897 size_(instructions * Assembler::kInstrSize),
Ben Murdoch8b112d22011-06-08 16:22:53 +01002898 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002899 // Create a new macro assembler pointing to the address of the code to patch.
2900 // The size is adjusted with kGap on order for the assembler to generate size
2901 // bytes of instructions without failing with buffer size constraints.
2902 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2903}
2904
2905
2906CodePatcher::~CodePatcher() {
2907 // Indicate that code has changed.
2908 CPU::FlushICache(address_, size_);
2909
2910 // Check that the code was patched as expected.
2911 ASSERT(masm_.pc_ == address_ + size_);
2912 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2913}
2914
2915
Steve Block1e0659c2011-05-24 12:43:12 +01002916void CodePatcher::Emit(Instr instr) {
2917 masm()->emit(instr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002918}
2919
2920
2921void CodePatcher::Emit(Address addr) {
2922 masm()->emit(reinterpret_cast<Instr>(addr));
2923}
Steve Block1e0659c2011-05-24 12:43:12 +01002924
2925
2926void CodePatcher::EmitCondition(Condition cond) {
2927 Instr instr = Assembler::instr_at(masm_.pc_);
2928 instr = (instr & ~kCondMask) | cond;
2929 masm_.emit(instr);
2930}
Steve Blocka7e24c12009-10-30 11:49:00 +00002931
2932
2933} } // namespace v8::internal
Leon Clarkef7060e22010-06-03 12:02:55 +01002934
2935#endif // V8_TARGET_ARCH_ARM