blob: aa6570ce11244cf85c319e2d5c382d0639946c1e [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "debug.h"
33#include "runtime.h"
34
35namespace v8 {
36namespace internal {
37
38MacroAssembler::MacroAssembler(void* buffer, int size)
39 : Assembler(buffer, size),
40 unresolved_(0),
41 generating_stub_(false),
42 allow_stub_calls_(true),
43 code_object_(Heap::undefined_value()) {
44}
45
46
47// We always generate arm code, never thumb code, even if V8 is compiled to
48// thumb, so we require inter-working support
49#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
50#error "flag -mthumb-interwork missing"
51#endif
52
53
54// We do not support thumb inter-working with an arm architecture not supporting
55// the blx instruction (below v5t). If you know what CPU you are compiling for
56// you can use -march=armv7 or similar.
57#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
58# error "For thumb inter-working we require an architecture which supports blx"
59#endif
60
61
62// Using blx may yield better code, so use it when required or when available
63#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
64#define USE_BLX 1
65#endif
66
67// Using bx does not yield better code, so use it only when required
68#if defined(USE_THUMB_INTERWORK)
69#define USE_BX 1
70#endif
71
72
73void MacroAssembler::Jump(Register target, Condition cond) {
74#if USE_BX
75 bx(target, cond);
76#else
77 mov(pc, Operand(target), LeaveCC, cond);
78#endif
79}
80
81
82void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
83 Condition cond) {
84#if USE_BX
85 mov(ip, Operand(target, rmode), LeaveCC, cond);
86 bx(ip, cond);
87#else
88 mov(pc, Operand(target, rmode), LeaveCC, cond);
89#endif
90}
91
92
93void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
94 Condition cond) {
95 ASSERT(!RelocInfo::IsCodeTarget(rmode));
96 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
97}
98
99
100void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
101 Condition cond) {
102 ASSERT(RelocInfo::IsCodeTarget(rmode));
103 // 'code' is always generated ARM code, never THUMB code
104 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
105}
106
107
108void MacroAssembler::Call(Register target, Condition cond) {
109#if USE_BLX
110 blx(target, cond);
111#else
112 // set lr for return at current pc + 8
113 mov(lr, Operand(pc), LeaveCC, cond);
114 mov(pc, Operand(target), LeaveCC, cond);
115#endif
116}
117
118
119void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
120 Condition cond) {
121 // Set lr for return at current pc + 8.
122 mov(lr, Operand(pc), LeaveCC, cond);
123 // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
124 mov(pc, Operand(target, rmode), LeaveCC, cond);
125 // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
126 // 'blx ip'; however, the code would not be shorter than the above sequence
127 // and the target address of the call would be referenced by the first
128 // instruction rather than the second one, which would make it harder to patch
129 // (two instructions before the return address, instead of one).
130 ASSERT(kCallTargetAddressOffset == kInstrSize);
131}
132
133
134void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
135 Condition cond) {
136 ASSERT(!RelocInfo::IsCodeTarget(rmode));
137 Call(reinterpret_cast<intptr_t>(target), rmode, cond);
138}
139
140
141void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
142 Condition cond) {
143 ASSERT(RelocInfo::IsCodeTarget(rmode));
144 // 'code' is always generated ARM code, never THUMB code
145 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
146}
147
148
149void MacroAssembler::Ret(Condition cond) {
150#if USE_BX
151 bx(lr, cond);
152#else
153 mov(pc, Operand(lr), LeaveCC, cond);
154#endif
155}
156
157
Steve Blockd0582a62009-12-15 09:54:21 +0000158void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
159 LoadRoot(ip, Heap::kStackLimitRootIndex);
160 cmp(sp, Operand(ip));
161 b(lo, on_stack_overflow);
162}
163
164
165
166
Steve Blocka7e24c12009-10-30 11:49:00 +0000167void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
168 // Empty the const pool.
169 CheckConstPool(true, true);
170 add(pc, pc, Operand(index,
171 LSL,
172 assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
173 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
174 nop(); // Jump table alignment.
175 for (int i = 0; i < targets.length(); i++) {
176 b(targets[i]);
177 }
178}
179
180
181void MacroAssembler::LoadRoot(Register destination,
182 Heap::RootListIndex index,
183 Condition cond) {
184 ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
185}
186
187
188// Will clobber 4 registers: object, offset, scratch, ip. The
189// register 'object' contains a heap object pointer. The heap object
190// tag is shifted away.
191void MacroAssembler::RecordWrite(Register object, Register offset,
192 Register scratch) {
193 // This is how much we shift the remembered set bit offset to get the
194 // offset of the word in the remembered set. We divide by kBitsPerInt (32,
195 // shift right 5) and then multiply by kIntSize (4, shift left 2).
196 const int kRSetWordShift = 3;
197
198 Label fast, done;
199
200 // First, test that the object is not in the new space. We cannot set
201 // remembered set bits in the new space.
202 // object: heap object pointer (with tag)
203 // offset: offset to store location from the object
204 and_(scratch, object, Operand(Heap::NewSpaceMask()));
205 cmp(scratch, Operand(ExternalReference::new_space_start()));
206 b(eq, &done);
207
208 // Compute the bit offset in the remembered set.
209 // object: heap object pointer (with tag)
210 // offset: offset to store location from the object
211 mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
212 and_(scratch, object, Operand(ip)); // offset into page of the object
213 add(offset, scratch, Operand(offset)); // add offset into the object
214 mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
215
216 // Compute the page address from the heap object pointer.
217 // object: heap object pointer (with tag)
218 // offset: bit offset of store position in the remembered set
219 bic(object, object, Operand(ip));
220
221 // If the bit offset lies beyond the normal remembered set range, it is in
222 // the extra remembered set area of a large object.
223 // object: page start
224 // offset: bit offset of store position in the remembered set
225 cmp(offset, Operand(Page::kPageSize / kPointerSize));
226 b(lt, &fast);
227
228 // Adjust the bit offset to be relative to the start of the extra
229 // remembered set and the start address to be the address of the extra
230 // remembered set.
231 sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
232 // Load the array length into 'scratch' and multiply by four to get the
233 // size in bytes of the elements.
234 ldr(scratch, MemOperand(object, Page::kObjectStartOffset
235 + FixedArray::kLengthOffset));
236 mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
237 // Add the page header (including remembered set), array header, and array
238 // body size to the page address.
239 add(object, object, Operand(Page::kObjectStartOffset
240 + FixedArray::kHeaderSize));
241 add(object, object, Operand(scratch));
242
243 bind(&fast);
244 // Get address of the rset word.
245 // object: start of the remembered set (page start for the fast case)
246 // offset: bit offset of store position in the remembered set
247 bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
248 add(object, object, Operand(scratch, LSR, kRSetWordShift));
249 // Get bit offset in the rset word.
250 // object: address of remembered set word
251 // offset: bit offset of store position
252 and_(offset, offset, Operand(kBitsPerInt - 1));
253
254 ldr(scratch, MemOperand(object));
255 mov(ip, Operand(1));
256 orr(scratch, scratch, Operand(ip, LSL, offset));
257 str(scratch, MemOperand(object));
258
259 bind(&done);
260}
261
262
263void MacroAssembler::EnterFrame(StackFrame::Type type) {
264 // r0-r3: preserved
265 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
266 mov(ip, Operand(Smi::FromInt(type)));
267 push(ip);
268 mov(ip, Operand(CodeObject()));
269 push(ip);
270 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
271}
272
273
274void MacroAssembler::LeaveFrame(StackFrame::Type type) {
275 // r0: preserved
276 // r1: preserved
277 // r2: preserved
278
279 // Drop the execution stack down to the frame pointer and restore
280 // the caller frame pointer and return address.
281 mov(sp, fp);
282 ldm(ia_w, sp, fp.bit() | lr.bit());
283}
284
285
Steve Blockd0582a62009-12-15 09:54:21 +0000286void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000287 // Compute the argv pointer and keep it in a callee-saved register.
288 // r0 is argc.
289 add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
290 sub(r6, r6, Operand(kPointerSize));
291
292 // Compute callee's stack pointer before making changes and save it as
293 // ip register so that it is restored as sp register on exit, thereby
294 // popping the args.
295
296 // ip = sp + kPointerSize * #args;
297 add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
298
299 // Align the stack at this point. After this point we have 5 pushes,
300 // so in fact we have to unalign here! See also the assert on the
301 // alignment in AlignStack.
302 AlignStack(1);
303
304 // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
305 stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
306 mov(fp, Operand(sp)); // setup new frame pointer
307
Steve Blockd0582a62009-12-15 09:54:21 +0000308 if (mode == ExitFrame::MODE_DEBUG) {
309 mov(ip, Operand(Smi::FromInt(0)));
310 } else {
311 mov(ip, Operand(CodeObject()));
312 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000313 push(ip);
314
315 // Save the frame pointer and the context in top.
316 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
317 str(fp, MemOperand(ip));
318 mov(ip, Operand(ExternalReference(Top::k_context_address)));
319 str(cp, MemOperand(ip));
320
321 // Setup argc and the builtin function in callee-saved registers.
322 mov(r4, Operand(r0));
323 mov(r5, Operand(r1));
324
325
326#ifdef ENABLE_DEBUGGER_SUPPORT
327 // Save the state of all registers to the stack from the memory
328 // location. This is needed to allow nested break points.
Steve Blockd0582a62009-12-15 09:54:21 +0000329 if (mode == ExitFrame::MODE_DEBUG) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000330 // Use sp as base to push.
331 CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
332 }
333#endif
334}
335
336
337void MacroAssembler::AlignStack(int offset) {
338#if defined(V8_HOST_ARCH_ARM)
339 // Running on the real platform. Use the alignment as mandated by the local
340 // environment.
341 // Note: This will break if we ever start generating snapshots on one ARM
342 // platform for another ARM platform with a different alignment.
343 int activation_frame_alignment = OS::ActivationFrameAlignment();
344#else // defined(V8_HOST_ARCH_ARM)
345 // If we are using the simulator then we should always align to the expected
346 // alignment. As the simulator is used to generate snapshots we do not know
347 // if the target platform will need alignment, so we will always align at
348 // this point here.
349 int activation_frame_alignment = 2 * kPointerSize;
350#endif // defined(V8_HOST_ARCH_ARM)
351 if (activation_frame_alignment != kPointerSize) {
352 // This code needs to be made more general if this assert doesn't hold.
353 ASSERT(activation_frame_alignment == 2 * kPointerSize);
354 mov(r7, Operand(Smi::FromInt(0)));
355 tst(sp, Operand(activation_frame_alignment - offset));
356 push(r7, eq); // Conditional push instruction.
357 }
358}
359
360
Steve Blockd0582a62009-12-15 09:54:21 +0000361void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000362#ifdef ENABLE_DEBUGGER_SUPPORT
363 // Restore the memory copy of the registers by digging them out from
364 // the stack. This is needed to allow nested break points.
Steve Blockd0582a62009-12-15 09:54:21 +0000365 if (mode == ExitFrame::MODE_DEBUG) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000366 // This code intentionally clobbers r2 and r3.
367 const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
Steve Blockd0582a62009-12-15 09:54:21 +0000368 const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000369 add(r3, fp, Operand(kOffset));
370 CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
371 }
372#endif
373
374 // Clear top frame.
375 mov(r3, Operand(0));
376 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
377 str(r3, MemOperand(ip));
378
379 // Restore current context from top and clear it in debug mode.
380 mov(ip, Operand(ExternalReference(Top::k_context_address)));
381 ldr(cp, MemOperand(ip));
382#ifdef DEBUG
383 str(r3, MemOperand(ip));
384#endif
385
386 // Pop the arguments, restore registers, and return.
387 mov(sp, Operand(fp)); // respect ABI stack constraint
388 ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
389}
390
391
392void MacroAssembler::InvokePrologue(const ParameterCount& expected,
393 const ParameterCount& actual,
394 Handle<Code> code_constant,
395 Register code_reg,
396 Label* done,
397 InvokeFlag flag) {
398 bool definitely_matches = false;
399 Label regular_invoke;
400
401 // Check whether the expected and actual arguments count match. If not,
402 // setup registers according to contract with ArgumentsAdaptorTrampoline:
403 // r0: actual arguments count
404 // r1: function (passed through to callee)
405 // r2: expected arguments count
406 // r3: callee code entry
407
408 // The code below is made a lot easier because the calling code already sets
409 // up actual and expected registers according to the contract if values are
410 // passed in registers.
411 ASSERT(actual.is_immediate() || actual.reg().is(r0));
412 ASSERT(expected.is_immediate() || expected.reg().is(r2));
413 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
414
415 if (expected.is_immediate()) {
416 ASSERT(actual.is_immediate());
417 if (expected.immediate() == actual.immediate()) {
418 definitely_matches = true;
419 } else {
420 mov(r0, Operand(actual.immediate()));
421 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
422 if (expected.immediate() == sentinel) {
423 // Don't worry about adapting arguments for builtins that
424 // don't want that done. Skip adaption code by making it look
425 // like we have a match between expected and actual number of
426 // arguments.
427 definitely_matches = true;
428 } else {
429 mov(r2, Operand(expected.immediate()));
430 }
431 }
432 } else {
433 if (actual.is_immediate()) {
434 cmp(expected.reg(), Operand(actual.immediate()));
435 b(eq, &regular_invoke);
436 mov(r0, Operand(actual.immediate()));
437 } else {
438 cmp(expected.reg(), Operand(actual.reg()));
439 b(eq, &regular_invoke);
440 }
441 }
442
443 if (!definitely_matches) {
444 if (!code_constant.is_null()) {
445 mov(r3, Operand(code_constant));
446 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
447 }
448
449 Handle<Code> adaptor =
450 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
451 if (flag == CALL_FUNCTION) {
452 Call(adaptor, RelocInfo::CODE_TARGET);
453 b(done);
454 } else {
455 Jump(adaptor, RelocInfo::CODE_TARGET);
456 }
457 bind(&regular_invoke);
458 }
459}
460
461
462void MacroAssembler::InvokeCode(Register code,
463 const ParameterCount& expected,
464 const ParameterCount& actual,
465 InvokeFlag flag) {
466 Label done;
467
468 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
469 if (flag == CALL_FUNCTION) {
470 Call(code);
471 } else {
472 ASSERT(flag == JUMP_FUNCTION);
473 Jump(code);
474 }
475
476 // Continue here if InvokePrologue does handle the invocation due to
477 // mismatched parameter counts.
478 bind(&done);
479}
480
481
482void MacroAssembler::InvokeCode(Handle<Code> code,
483 const ParameterCount& expected,
484 const ParameterCount& actual,
485 RelocInfo::Mode rmode,
486 InvokeFlag flag) {
487 Label done;
488
489 InvokePrologue(expected, actual, code, no_reg, &done, flag);
490 if (flag == CALL_FUNCTION) {
491 Call(code, rmode);
492 } else {
493 Jump(code, rmode);
494 }
495
496 // Continue here if InvokePrologue does handle the invocation due to
497 // mismatched parameter counts.
498 bind(&done);
499}
500
501
502void MacroAssembler::InvokeFunction(Register fun,
503 const ParameterCount& actual,
504 InvokeFlag flag) {
505 // Contract with called JS functions requires that function is passed in r1.
506 ASSERT(fun.is(r1));
507
508 Register expected_reg = r2;
509 Register code_reg = r3;
510
511 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
512 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
513 ldr(expected_reg,
514 FieldMemOperand(code_reg,
515 SharedFunctionInfo::kFormalParameterCountOffset));
516 ldr(code_reg,
517 MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
518 add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
519
520 ParameterCount expected(expected_reg);
521 InvokeCode(code_reg, expected, actual, flag);
522}
523
524
525#ifdef ENABLE_DEBUGGER_SUPPORT
526void MacroAssembler::SaveRegistersToMemory(RegList regs) {
527 ASSERT((regs & ~kJSCallerSaved) == 0);
528 // Copy the content of registers to memory location.
529 for (int i = 0; i < kNumJSCallerSaved; i++) {
530 int r = JSCallerSavedCode(i);
531 if ((regs & (1 << r)) != 0) {
532 Register reg = { r };
533 mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
534 str(reg, MemOperand(ip));
535 }
536 }
537}
538
539
540void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
541 ASSERT((regs & ~kJSCallerSaved) == 0);
542 // Copy the content of memory location to registers.
543 for (int i = kNumJSCallerSaved; --i >= 0;) {
544 int r = JSCallerSavedCode(i);
545 if ((regs & (1 << r)) != 0) {
546 Register reg = { r };
547 mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
548 ldr(reg, MemOperand(ip));
549 }
550 }
551}
552
553
554void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
555 RegList regs) {
556 ASSERT((regs & ~kJSCallerSaved) == 0);
557 // Copy the content of the memory location to the stack and adjust base.
558 for (int i = kNumJSCallerSaved; --i >= 0;) {
559 int r = JSCallerSavedCode(i);
560 if ((regs & (1 << r)) != 0) {
561 mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
562 ldr(ip, MemOperand(ip));
563 str(ip, MemOperand(base, 4, NegPreIndex));
564 }
565 }
566}
567
568
569void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
570 Register scratch,
571 RegList regs) {
572 ASSERT((regs & ~kJSCallerSaved) == 0);
573 // Copy the content of the stack to the memory location and adjust base.
574 for (int i = 0; i < kNumJSCallerSaved; i++) {
575 int r = JSCallerSavedCode(i);
576 if ((regs & (1 << r)) != 0) {
577 mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
578 ldr(scratch, MemOperand(base, 4, PostIndex));
579 str(scratch, MemOperand(ip));
580 }
581 }
582}
583#endif
584
585
586void MacroAssembler::PushTryHandler(CodeLocation try_location,
587 HandlerType type) {
588 // Adjust this code if not the case.
589 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
590 // The pc (return address) is passed in register lr.
591 if (try_location == IN_JAVASCRIPT) {
592 if (type == TRY_CATCH_HANDLER) {
593 mov(r3, Operand(StackHandler::TRY_CATCH));
594 } else {
595 mov(r3, Operand(StackHandler::TRY_FINALLY));
596 }
597 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
598 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
599 && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
600 stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
601 // Save the current handler as the next handler.
602 mov(r3, Operand(ExternalReference(Top::k_handler_address)));
603 ldr(r1, MemOperand(r3));
604 ASSERT(StackHandlerConstants::kNextOffset == 0);
605 push(r1);
606 // Link this handler as the new current one.
607 str(sp, MemOperand(r3));
608 } else {
609 // Must preserve r0-r4, r5-r7 are available.
610 ASSERT(try_location == IN_JS_ENTRY);
611 // The frame pointer does not point to a JS frame so we save NULL
612 // for fp. We expect the code throwing an exception to check fp
613 // before dereferencing it to restore the context.
614 mov(ip, Operand(0)); // To save a NULL frame pointer.
615 mov(r6, Operand(StackHandler::ENTRY));
616 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
617 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
618 && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
619 stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
620 // Save the current handler as the next handler.
621 mov(r7, Operand(ExternalReference(Top::k_handler_address)));
622 ldr(r6, MemOperand(r7));
623 ASSERT(StackHandlerConstants::kNextOffset == 0);
624 push(r6);
625 // Link this handler as the new current one.
626 str(sp, MemOperand(r7));
627 }
628}
629
630
631Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
632 JSObject* holder, Register holder_reg,
633 Register scratch,
634 Label* miss) {
635 // Make sure there's no overlap between scratch and the other
636 // registers.
637 ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
638
639 // Keep track of the current object in register reg.
640 Register reg = object_reg;
641 int depth = 1;
642
643 // Check the maps in the prototype chain.
644 // Traverse the prototype chain from the object and do map checks.
645 while (object != holder) {
646 depth++;
647
648 // Only global objects and objects that do not require access
649 // checks are allowed in stubs.
650 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
651
652 // Get the map of the current object.
653 ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
654 cmp(scratch, Operand(Handle<Map>(object->map())));
655
656 // Branch on the result of the map check.
657 b(ne, miss);
658
659 // Check access rights to the global object. This has to happen
660 // after the map check so that we know that the object is
661 // actually a global object.
662 if (object->IsJSGlobalProxy()) {
663 CheckAccessGlobalProxy(reg, scratch, miss);
664 // Restore scratch register to be the map of the object. In the
665 // new space case below, we load the prototype from the map in
666 // the scratch register.
667 ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
668 }
669
670 reg = holder_reg; // from now the object is in holder_reg
671 JSObject* prototype = JSObject::cast(object->GetPrototype());
672 if (Heap::InNewSpace(prototype)) {
673 // The prototype is in new space; we cannot store a reference
674 // to it in the code. Load it from the map.
675 ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
676 } else {
677 // The prototype is in old space; load it directly.
678 mov(reg, Operand(Handle<JSObject>(prototype)));
679 }
680
681 // Go to the next object in the prototype chain.
682 object = prototype;
683 }
684
685 // Check the holder map.
686 ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
687 cmp(scratch, Operand(Handle<Map>(object->map())));
688 b(ne, miss);
689
690 // Log the check depth.
691 LOG(IntEvent("check-maps-depth", depth));
692
693 // Perform security check for access to the global object and return
694 // the holder register.
695 ASSERT(object == holder);
696 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
697 if (object->IsJSGlobalProxy()) {
698 CheckAccessGlobalProxy(reg, scratch, miss);
699 }
700 return reg;
701}
702
703
704void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
705 Register scratch,
706 Label* miss) {
707 Label same_contexts;
708
709 ASSERT(!holder_reg.is(scratch));
710 ASSERT(!holder_reg.is(ip));
711 ASSERT(!scratch.is(ip));
712
713 // Load current lexical context from the stack frame.
714 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
715 // In debug mode, make sure the lexical context is set.
716#ifdef DEBUG
717 cmp(scratch, Operand(0));
718 Check(ne, "we should not have an empty lexical context");
719#endif
720
721 // Load the global context of the current context.
722 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
723 ldr(scratch, FieldMemOperand(scratch, offset));
724 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
725
726 // Check the context is a global context.
727 if (FLAG_debug_code) {
728 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
729 // Cannot use ip as a temporary in this verification code. Due to the fact
730 // that ip is clobbered as part of cmp with an object Operand.
731 push(holder_reg); // Temporarily save holder on the stack.
732 // Read the first word and compare to the global_context_map.
733 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
734 LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
735 cmp(holder_reg, ip);
736 Check(eq, "JSGlobalObject::global_context should be a global context.");
737 pop(holder_reg); // Restore holder.
738 }
739
740 // Check if both contexts are the same.
741 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
742 cmp(scratch, Operand(ip));
743 b(eq, &same_contexts);
744
745 // Check the context is a global context.
746 if (FLAG_debug_code) {
747 // TODO(119): avoid push(holder_reg)/pop(holder_reg)
748 // Cannot use ip as a temporary in this verification code. Due to the fact
749 // that ip is clobbered as part of cmp with an object Operand.
750 push(holder_reg); // Temporarily save holder on the stack.
751 mov(holder_reg, ip); // Move ip to its holding place.
752 LoadRoot(ip, Heap::kNullValueRootIndex);
753 cmp(holder_reg, ip);
754 Check(ne, "JSGlobalProxy::context() should not be null.");
755
756 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
757 LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
758 cmp(holder_reg, ip);
759 Check(eq, "JSGlobalObject::global_context should be a global context.");
760 // Restore ip is not needed. ip is reloaded below.
761 pop(holder_reg); // Restore holder.
762 // Restore ip to holder's context.
763 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
764 }
765
766 // Check that the security token in the calling global object is
767 // compatible with the security token in the receiving global
768 // object.
769 int token_offset = Context::kHeaderSize +
770 Context::SECURITY_TOKEN_INDEX * kPointerSize;
771
772 ldr(scratch, FieldMemOperand(scratch, token_offset));
773 ldr(ip, FieldMemOperand(ip, token_offset));
774 cmp(scratch, Operand(ip));
775 b(ne, miss);
776
777 bind(&same_contexts);
778}
779
780
781void MacroAssembler::AllocateInNewSpace(int object_size,
782 Register result,
783 Register scratch1,
784 Register scratch2,
785 Label* gc_required,
786 AllocationFlags flags) {
787 ASSERT(!result.is(scratch1));
788 ASSERT(!scratch1.is(scratch2));
789
790 // Load address of new object into result and allocation top address into
791 // scratch1.
792 ExternalReference new_space_allocation_top =
793 ExternalReference::new_space_allocation_top_address();
794 mov(scratch1, Operand(new_space_allocation_top));
795 if ((flags & RESULT_CONTAINS_TOP) == 0) {
796 ldr(result, MemOperand(scratch1));
Steve Blockd0582a62009-12-15 09:54:21 +0000797 } else if (FLAG_debug_code) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 // Assert that result actually contains top on entry. scratch2 is used
799 // immediately below so this use of scratch2 does not cause difference with
800 // respect to register content between debug and release mode.
801 ldr(scratch2, MemOperand(scratch1));
802 cmp(result, scratch2);
803 Check(eq, "Unexpected allocation top");
Steve Blocka7e24c12009-10-30 11:49:00 +0000804 }
805
806 // Calculate new top and bail out if new space is exhausted. Use result
807 // to calculate the new top.
808 ExternalReference new_space_allocation_limit =
809 ExternalReference::new_space_allocation_limit_address();
810 mov(scratch2, Operand(new_space_allocation_limit));
811 ldr(scratch2, MemOperand(scratch2));
812 add(result, result, Operand(object_size * kPointerSize));
813 cmp(result, Operand(scratch2));
814 b(hi, gc_required);
815
Steve Blockd0582a62009-12-15 09:54:21 +0000816 // Update allocation top. result temporarily holds the new top.
817 if (FLAG_debug_code) {
818 tst(result, Operand(kObjectAlignmentMask));
819 Check(eq, "Unaligned allocation in new space");
820 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 str(result, MemOperand(scratch1));
822
823 // Tag and adjust back to start of new object.
824 if ((flags & TAG_OBJECT) != 0) {
825 sub(result, result, Operand((object_size * kPointerSize) -
826 kHeapObjectTag));
827 } else {
828 sub(result, result, Operand(object_size * kPointerSize));
829 }
830}
831
832
833void MacroAssembler::AllocateInNewSpace(Register object_size,
834 Register result,
835 Register scratch1,
836 Register scratch2,
837 Label* gc_required,
838 AllocationFlags flags) {
839 ASSERT(!result.is(scratch1));
840 ASSERT(!scratch1.is(scratch2));
841
842 // Load address of new object into result and allocation top address into
843 // scratch1.
844 ExternalReference new_space_allocation_top =
845 ExternalReference::new_space_allocation_top_address();
846 mov(scratch1, Operand(new_space_allocation_top));
847 if ((flags & RESULT_CONTAINS_TOP) == 0) {
848 ldr(result, MemOperand(scratch1));
Steve Blockd0582a62009-12-15 09:54:21 +0000849 } else if (FLAG_debug_code) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000850 // Assert that result actually contains top on entry. scratch2 is used
851 // immediately below so this use of scratch2 does not cause difference with
852 // respect to register content between debug and release mode.
853 ldr(scratch2, MemOperand(scratch1));
854 cmp(result, scratch2);
855 Check(eq, "Unexpected allocation top");
Steve Blocka7e24c12009-10-30 11:49:00 +0000856 }
857
858 // Calculate new top and bail out if new space is exhausted. Use result
859 // to calculate the new top. Object size is in words so a shift is required to
860 // get the number of bytes
861 ExternalReference new_space_allocation_limit =
862 ExternalReference::new_space_allocation_limit_address();
863 mov(scratch2, Operand(new_space_allocation_limit));
864 ldr(scratch2, MemOperand(scratch2));
865 add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
866 cmp(result, Operand(scratch2));
867 b(hi, gc_required);
868
Steve Blockd0582a62009-12-15 09:54:21 +0000869 // Update allocation top. result temporarily holds the new top.
870 if (FLAG_debug_code) {
871 tst(result, Operand(kObjectAlignmentMask));
872 Check(eq, "Unaligned allocation in new space");
873 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000874 str(result, MemOperand(scratch1));
875
876 // Adjust back to start of new object.
877 sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
878
879 // Tag object if requested.
880 if ((flags & TAG_OBJECT) != 0) {
881 add(result, result, Operand(kHeapObjectTag));
882 }
883}
884
885
886void MacroAssembler::UndoAllocationInNewSpace(Register object,
887 Register scratch) {
888 ExternalReference new_space_allocation_top =
889 ExternalReference::new_space_allocation_top_address();
890
891 // Make sure the object has no tag before resetting top.
892 and_(object, object, Operand(~kHeapObjectTagMask));
893#ifdef DEBUG
894 // Check that the object un-allocated is below the current top.
895 mov(scratch, Operand(new_space_allocation_top));
896 ldr(scratch, MemOperand(scratch));
897 cmp(object, scratch);
898 Check(lt, "Undo allocation of non allocated memory");
899#endif
900 // Write the address of the object to un-allocate as the current top.
901 mov(scratch, Operand(new_space_allocation_top));
902 str(object, MemOperand(scratch));
903}
904
905
906void MacroAssembler::CompareObjectType(Register function,
907 Register map,
908 Register type_reg,
909 InstanceType type) {
910 ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
911 CompareInstanceType(map, type_reg, type);
912}
913
914
915void MacroAssembler::CompareInstanceType(Register map,
916 Register type_reg,
917 InstanceType type) {
918 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
919 cmp(type_reg, Operand(type));
920}
921
922
923void MacroAssembler::TryGetFunctionPrototype(Register function,
924 Register result,
925 Register scratch,
926 Label* miss) {
927 // Check that the receiver isn't a smi.
928 BranchOnSmi(function, miss);
929
930 // Check that the function really is a function. Load map into result reg.
931 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
932 b(ne, miss);
933
934 // Make sure that the function has an instance prototype.
935 Label non_instance;
936 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
937 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
938 b(ne, &non_instance);
939
940 // Get the prototype or initial map from the function.
941 ldr(result,
942 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
943
944 // If the prototype or initial map is the hole, don't return it and
945 // simply miss the cache instead. This will allow us to allocate a
946 // prototype object on-demand in the runtime system.
947 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
948 cmp(result, ip);
949 b(eq, miss);
950
951 // If the function does not have an initial map, we're done.
952 Label done;
953 CompareObjectType(result, scratch, scratch, MAP_TYPE);
954 b(ne, &done);
955
956 // Get the prototype from the initial map.
957 ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
958 jmp(&done);
959
960 // Non-instance prototype: Fetch prototype from constructor field
961 // in initial map.
962 bind(&non_instance);
963 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
964
965 // All done.
966 bind(&done);
967}
968
969
970void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
971 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
972 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
973}
974
975
976void MacroAssembler::StubReturn(int argc) {
977 ASSERT(argc >= 1 && generating_stub());
978 if (argc > 1)
979 add(sp, sp, Operand((argc - 1) * kPointerSize));
980 Ret();
981}
982
983
984void MacroAssembler::IllegalOperation(int num_arguments) {
985 if (num_arguments > 0) {
986 add(sp, sp, Operand(num_arguments * kPointerSize));
987 }
988 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
989}
990
991
Steve Blockd0582a62009-12-15 09:54:21 +0000992void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
993 Register outHighReg,
994 Register outLowReg) {
995 // ARMv7 VFP3 instructions to implement integer to double conversion.
996 mov(r7, Operand(inReg, ASR, kSmiTagSize));
997 fmsr(s15, r7);
998 fsitod(d7, s15);
999 fmrrd(outLowReg, outHighReg, d7);
1000}
1001
1002
Steve Blocka7e24c12009-10-30 11:49:00 +00001003void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
1004 // All parameters are on the stack. r0 has the return value after call.
1005
1006 // If the expected number of arguments of the runtime function is
1007 // constant, we check that the actual number of arguments match the
1008 // expectation.
1009 if (f->nargs >= 0 && f->nargs != num_arguments) {
1010 IllegalOperation(num_arguments);
1011 return;
1012 }
1013
1014 Runtime::FunctionId function_id =
1015 static_cast<Runtime::FunctionId>(f->stub_id);
1016 RuntimeStub stub(function_id, num_arguments);
1017 CallStub(&stub);
1018}
1019
1020
1021void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
1022 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
1023}
1024
1025
1026void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
1027 int num_arguments,
1028 int result_size) {
1029 // TODO(1236192): Most runtime routines don't need the number of
1030 // arguments passed in because it is constant. At some point we
1031 // should remove this need and make the runtime routine entry code
1032 // smarter.
1033 mov(r0, Operand(num_arguments));
1034 JumpToRuntime(ext);
1035}
1036
1037
1038void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
1039#if defined(__thumb__)
1040 // Thumb mode builtin.
1041 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
1042#endif
1043 mov(r1, Operand(builtin));
1044 CEntryStub stub(1);
1045 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1046}
1047
1048
1049Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
1050 bool* resolved) {
1051 // Contract with compiled functions is that the function is passed in r1.
1052 int builtins_offset =
1053 JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
1054 ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
1055 ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
1056 ldr(r1, FieldMemOperand(r1, builtins_offset));
1057
1058 return Builtins::GetCode(id, resolved);
1059}
1060
1061
1062void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1063 InvokeJSFlags flags) {
1064 bool resolved;
1065 Handle<Code> code = ResolveBuiltin(id, &resolved);
1066
1067 if (flags == CALL_JS) {
1068 Call(code, RelocInfo::CODE_TARGET);
1069 } else {
1070 ASSERT(flags == JUMP_JS);
1071 Jump(code, RelocInfo::CODE_TARGET);
1072 }
1073
1074 if (!resolved) {
1075 const char* name = Builtins::GetName(id);
1076 int argc = Builtins::GetArgumentsCount(id);
1077 uint32_t flags =
1078 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Steve Blocka7e24c12009-10-30 11:49:00 +00001079 Bootstrapper::FixupFlagsUseCodeObject::encode(false);
1080 Unresolved entry = { pc_offset() - kInstrSize, flags, name };
1081 unresolved_.Add(entry);
1082 }
1083}
1084
1085
1086void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
1087 bool resolved;
1088 Handle<Code> code = ResolveBuiltin(id, &resolved);
1089
1090 mov(target, Operand(code));
1091 if (!resolved) {
1092 const char* name = Builtins::GetName(id);
1093 int argc = Builtins::GetArgumentsCount(id);
1094 uint32_t flags =
1095 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Steve Blocka7e24c12009-10-30 11:49:00 +00001096 Bootstrapper::FixupFlagsUseCodeObject::encode(true);
1097 Unresolved entry = { pc_offset() - kInstrSize, flags, name };
1098 unresolved_.Add(entry);
1099 }
1100
1101 add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
1102}
1103
1104
1105void MacroAssembler::SetCounter(StatsCounter* counter, int value,
1106 Register scratch1, Register scratch2) {
1107 if (FLAG_native_code_counters && counter->Enabled()) {
1108 mov(scratch1, Operand(value));
1109 mov(scratch2, Operand(ExternalReference(counter)));
1110 str(scratch1, MemOperand(scratch2));
1111 }
1112}
1113
1114
1115void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1116 Register scratch1, Register scratch2) {
1117 ASSERT(value > 0);
1118 if (FLAG_native_code_counters && counter->Enabled()) {
1119 mov(scratch2, Operand(ExternalReference(counter)));
1120 ldr(scratch1, MemOperand(scratch2));
1121 add(scratch1, scratch1, Operand(value));
1122 str(scratch1, MemOperand(scratch2));
1123 }
1124}
1125
1126
1127void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1128 Register scratch1, Register scratch2) {
1129 ASSERT(value > 0);
1130 if (FLAG_native_code_counters && counter->Enabled()) {
1131 mov(scratch2, Operand(ExternalReference(counter)));
1132 ldr(scratch1, MemOperand(scratch2));
1133 sub(scratch1, scratch1, Operand(value));
1134 str(scratch1, MemOperand(scratch2));
1135 }
1136}
1137
1138
1139void MacroAssembler::Assert(Condition cc, const char* msg) {
1140 if (FLAG_debug_code)
1141 Check(cc, msg);
1142}
1143
1144
1145void MacroAssembler::Check(Condition cc, const char* msg) {
1146 Label L;
1147 b(cc, &L);
1148 Abort(msg);
1149 // will not return here
1150 bind(&L);
1151}
1152
1153
1154void MacroAssembler::Abort(const char* msg) {
1155 // We want to pass the msg string like a smi to avoid GC
1156 // problems, however msg is not guaranteed to be aligned
1157 // properly. Instead, we pass an aligned pointer that is
1158 // a proper v8 smi, but also pass the alignment difference
1159 // from the real pointer as a smi.
1160 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
1161 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
1162 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
1163#ifdef DEBUG
1164 if (msg != NULL) {
1165 RecordComment("Abort message: ");
1166 RecordComment(msg);
1167 }
1168#endif
Steve Blockd0582a62009-12-15 09:54:21 +00001169 // Disable stub call restrictions to always allow calls to abort.
1170 set_allow_stub_calls(true);
1171
Steve Blocka7e24c12009-10-30 11:49:00 +00001172 mov(r0, Operand(p0));
1173 push(r0);
1174 mov(r0, Operand(Smi::FromInt(p1 - p0)));
1175 push(r0);
1176 CallRuntime(Runtime::kAbort, 2);
1177 // will not return here
1178}
1179
1180
Steve Blockd0582a62009-12-15 09:54:21 +00001181void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
1182 if (context_chain_length > 0) {
1183 // Move up the chain of contexts to the context containing the slot.
1184 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
1185 // Load the function context (which is the incoming, outer context).
1186 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
1187 for (int i = 1; i < context_chain_length; i++) {
1188 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
1189 ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
1190 }
1191 // The context may be an intermediate context, not a function context.
1192 ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
1193 } else { // Slot is in the current function context.
1194 // The context may be an intermediate context, not a function context.
1195 ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
1196 }
1197}
1198
1199
1200
Steve Blocka7e24c12009-10-30 11:49:00 +00001201#ifdef ENABLE_DEBUGGER_SUPPORT
1202CodePatcher::CodePatcher(byte* address, int instructions)
1203 : address_(address),
1204 instructions_(instructions),
1205 size_(instructions * Assembler::kInstrSize),
1206 masm_(address, size_ + Assembler::kGap) {
1207 // Create a new macro assembler pointing to the address of the code to patch.
1208 // The size is adjusted with kGap on order for the assembler to generate size
1209 // bytes of instructions without failing with buffer size constraints.
1210 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1211}
1212
1213
1214CodePatcher::~CodePatcher() {
1215 // Indicate that code has changed.
1216 CPU::FlushICache(address_, size_);
1217
1218 // Check that the code was patched as expected.
1219 ASSERT(masm_.pc_ == address_ + size_);
1220 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
1221}
1222
1223
1224void CodePatcher::Emit(Instr x) {
1225 masm()->emit(x);
1226}
1227
1228
1229void CodePatcher::Emit(Address addr) {
1230 masm()->emit(reinterpret_cast<Instr>(addr));
1231}
1232#endif // ENABLE_DEBUGGER_SUPPORT
1233
1234
1235} } // namespace v8::internal