blob: c7f727bef604e1e0eff71ff0987b00b57f5d2c56 [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Ben Murdoch257744e2011-11-30 15:57:28 +000028#include <limits.h> // For LONG_MIN, LONG_MAX.
Andrei Popescu31002712010-02-23 13:46:05 +000029
30#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_MIPS)
33
Andrei Popescu31002712010-02-23 13:46:05 +000034#include "bootstrapper.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000035#include "codegen.h"
Andrei Popescu31002712010-02-23 13:46:05 +000036#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
Ben Murdoch257744e2011-11-30 15:57:28 +000042MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size),
Andrei Popescu31002712010-02-23 13:46:05 +000044 generating_stub_(false),
Ben Murdoch257744e2011-11-30 15:57:28 +000045 allow_stub_calls_(true) {
46 if (isolate() != NULL) {
47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48 isolate());
49 }
Andrei Popescu31002712010-02-23 13:46:05 +000050}
51
52
Andrei Popescu31002712010-02-23 13:46:05 +000053void MacroAssembler::LoadRoot(Register destination,
54 Heap::RootListIndex index) {
Steve Block6ded16b2010-05-10 14:33:55 +010055 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000056}
57
Steve Block44f0eee2011-05-26 01:26:41 +010058
Andrei Popescu31002712010-02-23 13:46:05 +000059void MacroAssembler::LoadRoot(Register destination,
60 Heap::RootListIndex index,
61 Condition cond,
62 Register src1, const Operand& src2) {
Steve Block44f0eee2011-05-26 01:26:41 +010063 Branch(2, NegateCondition(cond), src1, src2);
Steve Block6ded16b2010-05-10 14:33:55 +010064 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000065}
66
67
Steve Block44f0eee2011-05-26 01:26:41 +010068void MacroAssembler::StoreRoot(Register source,
69 Heap::RootListIndex index) {
70 sw(source, MemOperand(s6, index << kPointerSizeLog2));
71}
72
73
74void MacroAssembler::StoreRoot(Register source,
75 Heap::RootListIndex index,
76 Condition cond,
77 Register src1, const Operand& src2) {
78 Branch(2, NegateCondition(cond), src1, src2);
79 sw(source, MemOperand(s6, index << kPointerSizeLog2));
80}
81
82
83void MacroAssembler::RecordWriteHelper(Register object,
84 Register address,
85 Register scratch) {
Ben Murdoch257744e2011-11-30 15:57:28 +000086 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +010087 // Check that the object is not in new space.
88 Label not_in_new_space;
89 InNewSpace(object, scratch, ne, &not_in_new_space);
90 Abort("new-space object passed to RecordWriteHelper");
91 bind(&not_in_new_space);
92 }
93
94 // Calculate page address: Clear bits from 0 to kPageSizeBits.
95 if (mips32r2) {
96 Ins(object, zero_reg, 0, kPageSizeBits);
97 } else {
98 // The Ins macro is slow on r1, so use shifts instead.
99 srl(object, object, kPageSizeBits);
100 sll(object, object, kPageSizeBits);
101 }
102
103 // Calculate region number.
104 Ext(address, address, Page::kRegionSizeLog2,
105 kPageSizeBits - Page::kRegionSizeLog2);
106
107 // Mark region dirty.
108 lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
109 li(at, Operand(1));
110 sllv(at, at, address);
111 or_(scratch, scratch, at);
112 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
113}
114
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000115
Ben Murdoch257744e2011-11-30 15:57:28 +0000116// Push and pop all registers that can hold pointers.
117void MacroAssembler::PushSafepointRegisters() {
118 // Safepoints expect a block of kNumSafepointRegisters values on the
119 // stack, so adjust the stack for unsaved registers.
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 ASSERT(num_unsaved >= 0);
122 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
123 MultiPush(kSafepointSavedRegisters);
124}
125
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000126
Ben Murdoch257744e2011-11-30 15:57:28 +0000127void MacroAssembler::PopSafepointRegisters() {
128 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
129 MultiPop(kSafepointSavedRegisters);
130 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
131}
132
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000133
Ben Murdoch257744e2011-11-30 15:57:28 +0000134void MacroAssembler::PushSafepointRegistersAndDoubles() {
135 PushSafepointRegisters();
136 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
137 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
138 FPURegister reg = FPURegister::FromAllocationIndex(i);
139 sdc1(reg, MemOperand(sp, i * kDoubleSize));
140 }
141}
142
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000143
Ben Murdoch257744e2011-11-30 15:57:28 +0000144void MacroAssembler::PopSafepointRegistersAndDoubles() {
145 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
146 FPURegister reg = FPURegister::FromAllocationIndex(i);
147 ldc1(reg, MemOperand(sp, i * kDoubleSize));
148 }
149 Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
150 PopSafepointRegisters();
151}
152
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000153
Ben Murdoch257744e2011-11-30 15:57:28 +0000154void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
155 Register dst) {
156 sw(src, SafepointRegistersAndDoublesSlot(dst));
157}
158
159
160void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
161 sw(src, SafepointRegisterSlot(dst));
162}
163
164
165void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
166 lw(dst, SafepointRegisterSlot(src));
167}
168
169
170int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
171 // The registers are pushed starting with the highest encoding,
172 // which means that lowest encodings are closest to the stack pointer.
173 return kSafepointRegisterStackIndexMap[reg_code];
174}
175
176
177MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
178 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
179}
180
181
182MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
183 // General purpose registers are pushed last on the stack.
184 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
185 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
186 return MemOperand(sp, doubles_size + register_offset);
187}
188
189
190
Steve Block44f0eee2011-05-26 01:26:41 +0100191
192void MacroAssembler::InNewSpace(Register object,
193 Register scratch,
194 Condition cc,
195 Label* branch) {
196 ASSERT(cc == eq || cc == ne);
197 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
198 Branch(branch, cc, scratch,
199 Operand(ExternalReference::new_space_start(isolate())));
200}
201
202
203// Will clobber 4 registers: object, scratch0, scratch1, at. The
204// register 'object' contains a heap object pointer. The heap object
205// tag is shifted away.
206void MacroAssembler::RecordWrite(Register object,
207 Operand offset,
208 Register scratch0,
209 Register scratch1) {
210 // The compiled code assumes that record write doesn't change the
211 // context register, so we check that none of the clobbered
212 // registers are cp.
213 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
214
215 Label done;
216
217 // First, test that the object is not in the new space. We cannot set
218 // region marks for new space pages.
219 InNewSpace(object, scratch0, eq, &done);
220
221 // Add offset into the object.
222 Addu(scratch0, object, offset);
223
224 // Record the actual write.
225 RecordWriteHelper(object, scratch0, scratch1);
226
227 bind(&done);
228
229 // Clobber all input registers when running with the debug-code flag
230 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000231 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100232 li(object, Operand(BitCast<int32_t>(kZapValue)));
233 li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
234 li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
235 }
236}
237
238
239// Will clobber 4 registers: object, address, scratch, ip. The
240// register 'object' contains a heap object pointer. The heap object
241// tag is shifted away.
242void MacroAssembler::RecordWrite(Register object,
243 Register address,
Andrei Popescu31002712010-02-23 13:46:05 +0000244 Register scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100245 // The compiled code assumes that record write doesn't change the
246 // context register, so we check that none of the clobbered
247 // registers are cp.
248 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
249
250 Label done;
251
252 // First, test that the object is not in the new space. We cannot set
253 // region marks for new space pages.
254 InNewSpace(object, scratch, eq, &done);
255
256 // Record the actual write.
257 RecordWriteHelper(object, address, scratch);
258
259 bind(&done);
260
261 // Clobber all input registers when running with the debug-code flag
262 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000263 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100264 li(object, Operand(BitCast<int32_t>(kZapValue)));
265 li(address, Operand(BitCast<int32_t>(kZapValue)));
266 li(scratch, Operand(BitCast<int32_t>(kZapValue)));
267 }
268}
269
270
271// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000272// Allocation support.
Steve Block44f0eee2011-05-26 01:26:41 +0100273
274
275void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
276 Register scratch,
277 Label* miss) {
278 Label same_contexts;
279
280 ASSERT(!holder_reg.is(scratch));
281 ASSERT(!holder_reg.is(at));
282 ASSERT(!scratch.is(at));
283
284 // Load current lexical context from the stack frame.
285 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
286 // In debug mode, make sure the lexical context is set.
287#ifdef DEBUG
288 Check(ne, "we should not have an empty lexical context",
289 scratch, Operand(zero_reg));
290#endif
291
292 // Load the global context of the current context.
293 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
294 lw(scratch, FieldMemOperand(scratch, offset));
295 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
296
297 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000298 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100299 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000300 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100301 // Read the first word and compare to the global_context_map.
302 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
303 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
304 Check(eq, "JSGlobalObject::global_context should be a global context.",
305 holder_reg, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +0000306 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100307 }
308
309 // Check if both contexts are the same.
310 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
311 Branch(&same_contexts, eq, scratch, Operand(at));
312
313 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000314 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100315 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000316 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100317 mov(holder_reg, at); // Move at to its holding place.
318 LoadRoot(at, Heap::kNullValueRootIndex);
319 Check(ne, "JSGlobalProxy::context() should not be null.",
320 holder_reg, Operand(at));
321
322 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
323 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
324 Check(eq, "JSGlobalObject::global_context should be a global context.",
325 holder_reg, Operand(at));
326 // Restore at is not needed. at is reloaded below.
Ben Murdoch257744e2011-11-30 15:57:28 +0000327 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100328 // Restore at to holder's context.
329 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
330 }
331
332 // Check that the security token in the calling global object is
333 // compatible with the security token in the receiving global
334 // object.
335 int token_offset = Context::kHeaderSize +
336 Context::SECURITY_TOKEN_INDEX * kPointerSize;
337
338 lw(scratch, FieldMemOperand(scratch, token_offset));
339 lw(at, FieldMemOperand(at, token_offset));
340 Branch(miss, ne, scratch, Operand(at));
341
342 bind(&same_contexts);
Andrei Popescu31002712010-02-23 13:46:05 +0000343}
344
345
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000346void MacroAssembler::LoadFromNumberDictionary(Label* miss,
347 Register elements,
348 Register key,
349 Register result,
350 Register reg0,
351 Register reg1,
352 Register reg2) {
353 // Register use:
354 //
355 // elements - holds the slow-case elements of the receiver on entry.
356 // Unchanged unless 'result' is the same register.
357 //
358 // key - holds the smi key on entry.
359 // Unchanged unless 'result' is the same register.
360 //
361 //
362 // result - holds the result on exit if the load succeeded.
363 // Allowed to be the same as 'key' or 'result'.
364 // Unchanged on bailout so 'key' or 'result' can be used
365 // in further computation.
366 //
367 // Scratch registers:
368 //
369 // reg0 - holds the untagged key on entry and holds the hash once computed.
370 //
371 // reg1 - Used to hold the capacity mask of the dictionary.
372 //
373 // reg2 - Used for the index into the dictionary.
374 // at - Temporary (avoid MacroAssembler instructions also using 'at').
375 Label done;
376
377 // Compute the hash code from the untagged key. This must be kept in sync
378 // with ComputeIntegerHash in utils.h.
379 //
380 // hash = ~hash + (hash << 15);
381 nor(reg1, reg0, zero_reg);
382 sll(at, reg0, 15);
383 addu(reg0, reg1, at);
384
385 // hash = hash ^ (hash >> 12);
386 srl(at, reg0, 12);
387 xor_(reg0, reg0, at);
388
389 // hash = hash + (hash << 2);
390 sll(at, reg0, 2);
391 addu(reg0, reg0, at);
392
393 // hash = hash ^ (hash >> 4);
394 srl(at, reg0, 4);
395 xor_(reg0, reg0, at);
396
397 // hash = hash * 2057;
398 li(reg1, Operand(2057));
399 mul(reg0, reg0, reg1);
400
401 // hash = hash ^ (hash >> 16);
402 srl(at, reg0, 16);
403 xor_(reg0, reg0, at);
404
405 // Compute the capacity mask.
406 lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
407 sra(reg1, reg1, kSmiTagSize);
408 Subu(reg1, reg1, Operand(1));
409
410 // Generate an unrolled loop that performs a few probes before giving up.
411 static const int kProbes = 4;
412 for (int i = 0; i < kProbes; i++) {
413 // Use reg2 for index calculations and keep the hash intact in reg0.
414 mov(reg2, reg0);
415 // Compute the masked index: (hash + i + i * i) & mask.
416 if (i > 0) {
417 Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
418 }
419 and_(reg2, reg2, reg1);
420
421 // Scale the index by multiplying by the element size.
422 ASSERT(NumberDictionary::kEntrySize == 3);
423 sll(at, reg2, 1); // 2x.
424 addu(reg2, reg2, at); // reg2 = reg2 * 3.
425
426 // Check if the key is identical to the name.
427 sll(at, reg2, kPointerSizeLog2);
428 addu(reg2, elements, at);
429
430 lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
431 if (i != kProbes - 1) {
432 Branch(&done, eq, key, Operand(at));
433 } else {
434 Branch(miss, ne, key, Operand(at));
435 }
436 }
437
438 bind(&done);
439 // Check that the value is a normal property.
440 // reg2: elements + (index * kPointerSize).
441 const int kDetailsOffset =
442 NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
443 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
444 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
445 Branch(miss, ne, at, Operand(zero_reg));
446
447 // Get the value at the masked, scaled index and return.
448 const int kValueOffset =
449 NumberDictionary::kElementsStartOffset + kPointerSize;
450 lw(result, FieldMemOperand(reg2, kValueOffset));
451}
452
453
Andrei Popescu31002712010-02-23 13:46:05 +0000454// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000455// Instruction macros.
Andrei Popescu31002712010-02-23 13:46:05 +0000456
Andrei Popescu31002712010-02-23 13:46:05 +0000457void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
458 if (rt.is_reg()) {
459 addu(rd, rs, rt.rm());
460 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100461 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000462 addiu(rd, rs, rt.imm32_);
463 } else {
464 // li handles the relocation.
465 ASSERT(!rs.is(at));
466 li(at, rt);
467 addu(rd, rs, at);
468 }
469 }
470}
471
472
Steve Block44f0eee2011-05-26 01:26:41 +0100473void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
474 if (rt.is_reg()) {
475 subu(rd, rs, rt.rm());
476 } else {
477 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
478 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
479 } else {
480 // li handles the relocation.
481 ASSERT(!rs.is(at));
482 li(at, rt);
483 subu(rd, rs, at);
484 }
485 }
486}
487
488
Andrei Popescu31002712010-02-23 13:46:05 +0000489void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
490 if (rt.is_reg()) {
491 mul(rd, rs, rt.rm());
492 } else {
493 // li handles the relocation.
494 ASSERT(!rs.is(at));
495 li(at, rt);
496 mul(rd, rs, at);
497 }
498}
499
500
501void MacroAssembler::Mult(Register rs, const Operand& rt) {
502 if (rt.is_reg()) {
503 mult(rs, rt.rm());
504 } else {
505 // li handles the relocation.
506 ASSERT(!rs.is(at));
507 li(at, rt);
508 mult(rs, at);
509 }
510}
511
512
513void MacroAssembler::Multu(Register rs, const Operand& rt) {
514 if (rt.is_reg()) {
515 multu(rs, rt.rm());
516 } else {
517 // li handles the relocation.
518 ASSERT(!rs.is(at));
519 li(at, rt);
520 multu(rs, at);
521 }
522}
523
524
525void MacroAssembler::Div(Register rs, const Operand& rt) {
526 if (rt.is_reg()) {
527 div(rs, rt.rm());
528 } else {
529 // li handles the relocation.
530 ASSERT(!rs.is(at));
531 li(at, rt);
532 div(rs, at);
533 }
534}
535
536
537void MacroAssembler::Divu(Register rs, const Operand& rt) {
538 if (rt.is_reg()) {
539 divu(rs, rt.rm());
540 } else {
541 // li handles the relocation.
542 ASSERT(!rs.is(at));
543 li(at, rt);
544 divu(rs, at);
545 }
546}
547
548
549void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
550 if (rt.is_reg()) {
551 and_(rd, rs, rt.rm());
552 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100553 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000554 andi(rd, rs, rt.imm32_);
555 } else {
556 // li handles the relocation.
557 ASSERT(!rs.is(at));
558 li(at, rt);
559 and_(rd, rs, at);
560 }
561 }
562}
563
564
565void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
566 if (rt.is_reg()) {
567 or_(rd, rs, rt.rm());
568 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100569 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000570 ori(rd, rs, rt.imm32_);
571 } else {
572 // li handles the relocation.
573 ASSERT(!rs.is(at));
574 li(at, rt);
575 or_(rd, rs, at);
576 }
577 }
578}
579
580
581void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
582 if (rt.is_reg()) {
583 xor_(rd, rs, rt.rm());
584 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100585 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000586 xori(rd, rs, rt.imm32_);
587 } else {
588 // li handles the relocation.
589 ASSERT(!rs.is(at));
590 li(at, rt);
591 xor_(rd, rs, at);
592 }
593 }
594}
595
596
597void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
598 if (rt.is_reg()) {
599 nor(rd, rs, rt.rm());
600 } else {
601 // li handles the relocation.
602 ASSERT(!rs.is(at));
603 li(at, rt);
604 nor(rd, rs, at);
605 }
606}
607
608
Ben Murdoch257744e2011-11-30 15:57:28 +0000609void MacroAssembler::Neg(Register rs, const Operand& rt) {
610 ASSERT(rt.is_reg());
611 ASSERT(!at.is(rs));
612 ASSERT(!at.is(rt.rm()));
613 li(at, -1);
614 xor_(rs, rt.rm(), at);
615}
616
617
Andrei Popescu31002712010-02-23 13:46:05 +0000618void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
619 if (rt.is_reg()) {
620 slt(rd, rs, rt.rm());
621 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100622 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000623 slti(rd, rs, rt.imm32_);
624 } else {
625 // li handles the relocation.
626 ASSERT(!rs.is(at));
627 li(at, rt);
628 slt(rd, rs, at);
629 }
630 }
631}
632
633
634void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
635 if (rt.is_reg()) {
636 sltu(rd, rs, rt.rm());
637 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100638 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000639 sltiu(rd, rs, rt.imm32_);
640 } else {
641 // li handles the relocation.
642 ASSERT(!rs.is(at));
643 li(at, rt);
644 sltu(rd, rs, at);
645 }
646 }
647}
648
649
Steve Block44f0eee2011-05-26 01:26:41 +0100650void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
651 if (mips32r2) {
652 if (rt.is_reg()) {
653 rotrv(rd, rs, rt.rm());
654 } else {
655 rotr(rd, rs, rt.imm32_);
656 }
657 } else {
658 if (rt.is_reg()) {
659 subu(at, zero_reg, rt.rm());
660 sllv(at, rs, at);
661 srlv(rd, rs, rt.rm());
662 or_(rd, rd, at);
663 } else {
664 if (rt.imm32_ == 0) {
665 srl(rd, rs, 0);
666 } else {
667 srl(at, rs, rt.imm32_);
668 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
669 or_(rd, rd, at);
670 }
671 }
672 }
Andrei Popescu31002712010-02-23 13:46:05 +0000673}
674
675
Steve Block44f0eee2011-05-26 01:26:41 +0100676//------------Pseudo-instructions-------------
677
Andrei Popescu31002712010-02-23 13:46:05 +0000678void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
679 ASSERT(!j.is_reg());
Steve Block44f0eee2011-05-26 01:26:41 +0100680 BlockTrampolinePoolScope block_trampoline_pool(this);
681 if (!MustUseReg(j.rmode_) && !gen2instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000682 // Normal load of an immediate value which does not need Relocation Info.
683 if (is_int16(j.imm32_)) {
684 addiu(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100685 } else if (!(j.imm32_ & kHiMask)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000686 ori(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100687 } else if (!(j.imm32_ & kImm16Mask)) {
688 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
Andrei Popescu31002712010-02-23 13:46:05 +0000689 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100690 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
691 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000692 }
Steve Block44f0eee2011-05-26 01:26:41 +0100693 } else if (MustUseReg(j.rmode_) || gen2instr) {
694 if (MustUseReg(j.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000695 RecordRelocInfo(j.rmode_, j.imm32_);
696 }
697 // We need always the same number of instructions as we may need to patch
698 // this code to load another value which may need 2 instructions to load.
Ben Murdoch257744e2011-11-30 15:57:28 +0000699 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
700 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000701 }
702}
703
704
Andrei Popescu31002712010-02-23 13:46:05 +0000705void MacroAssembler::MultiPush(RegList regs) {
706 int16_t NumSaved = 0;
707 int16_t NumToPush = NumberOfBitsSet(regs);
708
709 addiu(sp, sp, -4 * NumToPush);
Steve Block6ded16b2010-05-10 14:33:55 +0100710 for (int16_t i = kNumRegisters; i > 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000711 if ((regs & (1 << i)) != 0) {
712 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
713 }
714 }
715}
716
717
718void MacroAssembler::MultiPushReversed(RegList regs) {
719 int16_t NumSaved = 0;
720 int16_t NumToPush = NumberOfBitsSet(regs);
721
722 addiu(sp, sp, -4 * NumToPush);
Steve Block6ded16b2010-05-10 14:33:55 +0100723 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000724 if ((regs & (1 << i)) != 0) {
725 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
726 }
727 }
728}
729
730
731void MacroAssembler::MultiPop(RegList regs) {
732 int16_t NumSaved = 0;
733
Steve Block6ded16b2010-05-10 14:33:55 +0100734 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000735 if ((regs & (1 << i)) != 0) {
736 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
737 }
738 }
739 addiu(sp, sp, 4 * NumSaved);
740}
741
742
743void MacroAssembler::MultiPopReversed(RegList regs) {
744 int16_t NumSaved = 0;
745
Steve Block6ded16b2010-05-10 14:33:55 +0100746 for (int16_t i = kNumRegisters; i > 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000747 if ((regs & (1 << i)) != 0) {
748 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
749 }
750 }
751 addiu(sp, sp, 4 * NumSaved);
752}
753
754
Steve Block44f0eee2011-05-26 01:26:41 +0100755void MacroAssembler::Ext(Register rt,
756 Register rs,
757 uint16_t pos,
758 uint16_t size) {
759 ASSERT(pos < 32);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000760 ASSERT(pos + size < 33);
Andrei Popescu31002712010-02-23 13:46:05 +0000761
Steve Block44f0eee2011-05-26 01:26:41 +0100762 if (mips32r2) {
763 ext_(rt, rs, pos, size);
764 } else {
765 // Move rs to rt and shift it left then right to get the
766 // desired bitfield on the right side and zeroes on the left.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000767 int shift_left = 32 - (pos + size);
768 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
769
770 int shift_right = 32 - size;
771 if (shift_right > 0) {
772 srl(rt, rt, shift_right);
773 }
Steve Block44f0eee2011-05-26 01:26:41 +0100774 }
775}
776
777
778void MacroAssembler::Ins(Register rt,
779 Register rs,
780 uint16_t pos,
781 uint16_t size) {
782 ASSERT(pos < 32);
783 ASSERT(pos + size < 32);
784
785 if (mips32r2) {
786 ins_(rt, rs, pos, size);
787 } else {
788 ASSERT(!rt.is(t8) && !rs.is(t8));
789
790 srl(t8, rt, pos + size);
791 // The left chunk from rt that needs to
792 // be saved is on the right side of t8.
793 sll(at, t8, pos + size);
794 // The 'at' register now contains the left chunk on
795 // the left (proper position) and zeroes.
796 sll(t8, rt, 32 - pos);
797 // t8 now contains the right chunk on the left and zeroes.
798 srl(t8, t8, 32 - pos);
799 // t8 now contains the right chunk on
800 // the right (proper position) and zeroes.
801 or_(rt, at, t8);
802 // rt now contains the left and right chunks from the original rt
803 // in their proper position and zeroes in the middle.
804 sll(t8, rs, 32 - size);
805 // t8 now contains the chunk from rs on the left and zeroes.
806 srl(t8, t8, 32 - size - pos);
807 // t8 now contains the original chunk from rs in
808 // the middle (proper position).
809 or_(rt, rt, t8);
810 // rt now contains the result of the ins instruction in R2 mode.
811 }
812}
813
814
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000815void MacroAssembler::Cvt_d_uw(FPURegister fd,
816 FPURegister fs,
817 FPURegister scratch) {
818 // Move the data from fs to t8.
819 mfc1(t8, fs);
820 Cvt_d_uw(fd, t8, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100821}
822
823
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000824void MacroAssembler::Cvt_d_uw(FPURegister fd,
825 Register rs,
826 FPURegister scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100827 // Convert rs to a FP value in fd (and fd + 1).
828 // We do this by converting rs minus the MSB to avoid sign conversion,
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000829 // then adding 2^31 to the result (if needed).
Steve Block44f0eee2011-05-26 01:26:41 +0100830
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000831 ASSERT(!fd.is(scratch));
Steve Block44f0eee2011-05-26 01:26:41 +0100832 ASSERT(!rs.is(t9));
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000833 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100834
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000835 // Save rs's MSB to t9.
836 Ext(t9, rs, 31, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100837 // Remove rs's MSB.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000838 Ext(at, rs, 0, 31);
839 // Move the result to fd.
840 mtc1(at, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100841
842 // Convert fd to a real FP value.
843 cvt_d_w(fd, fd);
844
845 Label conversion_done;
846
847 // If rs's MSB was 0, it's done.
848 // Otherwise we need to add that to the FP register.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000849 Branch(&conversion_done, eq, t9, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +0100850
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000851 // Load 2^31 into f20 as its float representation.
852 li(at, 0x41E00000);
853 mtc1(at, FPURegister::from_code(scratch.code() + 1));
854 mtc1(zero_reg, scratch);
855 // Add it to fd.
856 add_d(fd, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100857
Steve Block44f0eee2011-05-26 01:26:41 +0100858 bind(&conversion_done);
859}
860
861
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000862void MacroAssembler::Trunc_uw_d(FPURegister fd,
863 FPURegister fs,
864 FPURegister scratch) {
865 Trunc_uw_d(fs, t8, scratch);
866 mtc1(t8, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100867}
868
869
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000870void MacroAssembler::Trunc_uw_d(FPURegister fd,
871 Register rs,
872 FPURegister scratch) {
873 ASSERT(!fd.is(scratch));
874 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100875
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000876 // Load 2^31 into scratch as its float representation.
877 li(at, 0x41E00000);
878 mtc1(at, FPURegister::from_code(scratch.code() + 1));
879 mtc1(zero_reg, scratch);
880 // Test if scratch > fd.
881 c(OLT, D, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100882
883 Label simple_convert;
884 // If fd < 2^31 we can convert it normally.
885 bc1t(&simple_convert);
886
887 // First we subtract 2^31 from fd, then trunc it to rs
888 // and add 2^31 to rs.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000889 sub_d(scratch, fd, scratch);
890 trunc_w_d(scratch, scratch);
891 mfc1(rs, scratch);
892 Or(rs, rs, 1 << 31);
Steve Block44f0eee2011-05-26 01:26:41 +0100893
894 Label done;
895 Branch(&done);
896 // Simple conversion.
897 bind(&simple_convert);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000898 trunc_w_d(scratch, fd);
899 mfc1(rs, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100900
901 bind(&done);
902}
903
904
905// Tries to get a signed int32 out of a double precision floating point heap
906// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
907// 32bits signed integer range.
908// This method implementation differs from the ARM version for performance
909// reasons.
910void MacroAssembler::ConvertToInt32(Register source,
911 Register dest,
912 Register scratch,
913 Register scratch2,
914 FPURegister double_scratch,
915 Label *not_int32) {
916 Label right_exponent, done;
917 // Get exponent word (ENDIAN issues).
918 lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
919 // Get exponent alone in scratch2.
920 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
921 // Load dest with zero. We use this either for the final shift or
922 // for the answer.
923 mov(dest, zero_reg);
924 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
925 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
926 // the exponent that we are fastest at and also the highest exponent we can
927 // handle here.
928 const uint32_t non_smi_exponent =
929 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
930 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
931 Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
932 // If the exponent is higher than that then go to not_int32 case. This
933 // catches numbers that don't fit in a signed int32, infinities and NaNs.
934 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
935
936 // We know the exponent is smaller than 30 (biased). If it is less than
937 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
938 // it rounds to zero.
939 const uint32_t zero_exponent =
940 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
941 Subu(scratch2, scratch2, Operand(zero_exponent));
942 // Dest already has a Smi zero.
943 Branch(&done, lt, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000944 if (!CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100945 // We have a shifted exponent between 0 and 30 in scratch2.
946 srl(dest, scratch2, HeapNumber::kExponentShift);
947 // We now have the exponent in dest. Subtract from 30 to get
948 // how much to shift down.
949 li(at, Operand(30));
950 subu(dest, at, dest);
951 }
952 bind(&right_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +0000953 if (CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100954 CpuFeatures::Scope scope(FPU);
955 // MIPS FPU instructions implementing double precision to integer
956 // conversion using round to zero. Since the FP value was qualified
957 // above, the resulting integer should be a legal int32.
958 // The original 'Exponent' word is still in scratch.
959 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
960 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
961 trunc_w_d(double_scratch, double_scratch);
962 mfc1(dest, double_scratch);
963 } else {
964 // On entry, dest has final downshift, scratch has original sign/exp/mant.
965 // Save sign bit in top bit of dest.
966 And(scratch2, scratch, Operand(0x80000000));
967 Or(dest, dest, Operand(scratch2));
968 // Put back the implicit 1, just above mantissa field.
969 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
970
971 // Shift up the mantissa bits to take up the space the exponent used to
972 // take. We just orred in the implicit bit so that took care of one and
973 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
974 // distance. But we want to clear the sign-bit so shift one more bit
975 // left, then shift right one bit.
976 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
977 sll(scratch, scratch, shift_distance + 1);
978 srl(scratch, scratch, 1);
979
980 // Get the second half of the double. For some exponents we don't
981 // actually need this because the bits get shifted out again, but
982 // it's probably slower to test than just to do it.
983 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
984 // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
985 // The width of the field here is the same as the shift amount above.
986 const int field_width = shift_distance;
987 Ext(scratch2, scratch2, 32-shift_distance, field_width);
988 Ins(scratch, scratch2, 0, field_width);
989 // Move down according to the exponent.
990 srlv(scratch, scratch, dest);
991 // Prepare the negative version of our integer.
992 subu(scratch2, zero_reg, scratch);
993 // Trick to check sign bit (msb) held in dest, count leading zero.
994 // 0 indicates negative, save negative version with conditional move.
995 clz(dest, dest);
996 movz(scratch, scratch2, dest);
997 mov(dest, scratch);
998 }
999 bind(&done);
1000}
1001
1002
Ben Murdoch257744e2011-11-30 15:57:28 +00001003void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1004 Register input_high,
1005 Register input_low,
1006 Register scratch) {
1007 Label done, normal_exponent, restore_sign;
1008 // Extract the biased exponent in result.
1009 Ext(result,
1010 input_high,
1011 HeapNumber::kExponentShift,
1012 HeapNumber::kExponentBits);
1013
1014 // Check for Infinity and NaNs, which should return 0.
1015 Subu(scratch, result, HeapNumber::kExponentMask);
1016 movz(result, zero_reg, scratch);
1017 Branch(&done, eq, scratch, Operand(zero_reg));
1018
1019 // Express exponent as delta to (number of mantissa bits + 31).
1020 Subu(result,
1021 result,
1022 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1023
1024 // If the delta is strictly positive, all bits would be shifted away,
1025 // which means that we can return 0.
1026 Branch(&normal_exponent, le, result, Operand(zero_reg));
1027 mov(result, zero_reg);
1028 Branch(&done);
1029
1030 bind(&normal_exponent);
1031 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1032 // Calculate shift.
1033 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1034
1035 // Save the sign.
1036 Register sign = result;
1037 result = no_reg;
1038 And(sign, input_high, Operand(HeapNumber::kSignMask));
1039
1040 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1041 // to check for this specific case.
1042 Label high_shift_needed, high_shift_done;
1043 Branch(&high_shift_needed, lt, scratch, Operand(32));
1044 mov(input_high, zero_reg);
1045 Branch(&high_shift_done);
1046 bind(&high_shift_needed);
1047
1048 // Set the implicit 1 before the mantissa part in input_high.
1049 Or(input_high,
1050 input_high,
1051 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1052 // Shift the mantissa bits to the correct position.
1053 // We don't need to clear non-mantissa bits as they will be shifted away.
1054 // If they weren't, it would mean that the answer is in the 32bit range.
1055 sllv(input_high, input_high, scratch);
1056
1057 bind(&high_shift_done);
1058
1059 // Replace the shifted bits with bits from the lower mantissa word.
1060 Label pos_shift, shift_done;
1061 li(at, 32);
1062 subu(scratch, at, scratch);
1063 Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1064
1065 // Negate scratch.
1066 Subu(scratch, zero_reg, scratch);
1067 sllv(input_low, input_low, scratch);
1068 Branch(&shift_done);
1069
1070 bind(&pos_shift);
1071 srlv(input_low, input_low, scratch);
1072
1073 bind(&shift_done);
1074 Or(input_high, input_high, Operand(input_low));
1075 // Restore sign if necessary.
1076 mov(scratch, sign);
1077 result = sign;
1078 sign = no_reg;
1079 Subu(result, zero_reg, input_high);
1080 movz(result, input_high, scratch);
1081 bind(&done);
1082}
1083
1084
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001085void MacroAssembler::EmitECMATruncate(Register result,
1086 FPURegister double_input,
1087 FPURegister single_scratch,
1088 Register scratch,
1089 Register input_high,
1090 Register input_low) {
1091 CpuFeatures::Scope scope(FPU);
1092 ASSERT(!input_high.is(result));
1093 ASSERT(!input_low.is(result));
1094 ASSERT(!input_low.is(input_high));
1095 ASSERT(!scratch.is(result) &&
1096 !scratch.is(input_high) &&
1097 !scratch.is(input_low));
1098 ASSERT(!single_scratch.is(double_input));
1099
1100 Label done;
1101 Label manual;
1102
1103 // Clear cumulative exception flags and save the FCSR.
1104 Register scratch2 = input_high;
1105 cfc1(scratch2, FCSR);
1106 ctc1(zero_reg, FCSR);
1107 // Try a conversion to a signed integer.
1108 trunc_w_d(single_scratch, double_input);
1109 mfc1(result, single_scratch);
1110 // Retrieve and restore the FCSR.
1111 cfc1(scratch, FCSR);
1112 ctc1(scratch2, FCSR);
1113 // Check for overflow and NaNs.
1114 And(scratch,
1115 scratch,
1116 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1117 // If we had no exceptions we are done.
1118 Branch(&done, eq, scratch, Operand(zero_reg));
1119
1120 // Load the double value and perform a manual truncation.
1121 Move(input_low, input_high, double_input);
1122 EmitOutOfInt32RangeTruncate(result,
1123 input_high,
1124 input_low,
1125 scratch);
1126 bind(&done);
1127}
1128
1129
Ben Murdoch257744e2011-11-30 15:57:28 +00001130void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1131 Register src,
1132 int num_least_bits) {
1133 Ext(dst, src, kSmiTagSize, num_least_bits);
1134}
1135
1136
1137void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1138 Register src,
1139 int num_least_bits) {
1140 And(dst, src, Operand((1 << num_least_bits) - 1));
1141}
1142
1143
Steve Block44f0eee2011-05-26 01:26:41 +01001144// Emulated condtional branches do not emit a nop in the branch delay slot.
1145//
1146// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1147#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1148 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1149 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1150
1151
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001152bool MacroAssembler::UseAbsoluteCodePointers() {
1153 if (is_trampoline_emitted()) {
1154 return true;
1155 } else {
1156 return false;
1157 }
1158}
1159
1160
Steve Block44f0eee2011-05-26 01:26:41 +01001161void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001162 BranchShort(offset, bdslot);
1163}
1164
1165
1166void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1167 const Operand& rt,
1168 BranchDelaySlot bdslot) {
1169 BranchShort(offset, cond, rs, rt, bdslot);
1170}
1171
1172
1173void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1174 bool is_label_near = is_near(L);
1175 if (UseAbsoluteCodePointers() && !is_label_near) {
1176 Jr(L, bdslot);
1177 } else {
1178 BranchShort(L, bdslot);
1179 }
1180}
1181
1182
1183void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1184 const Operand& rt,
1185 BranchDelaySlot bdslot) {
1186 bool is_label_near = is_near(L);
1187 if (UseAbsoluteCodePointers() && !is_label_near) {
1188 Label skip;
1189 Condition neg_cond = NegateCondition(cond);
1190 BranchShort(&skip, neg_cond, rs, rt);
1191 Jr(L, bdslot);
1192 bind(&skip);
1193 } else {
1194 BranchShort(L, cond, rs, rt, bdslot);
1195 }
1196}
1197
1198
1199void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001200 b(offset);
1201
1202 // Emit a nop in the branch delay slot if required.
1203 if (bdslot == PROTECT)
1204 nop();
1205}
1206
1207
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001208void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1209 const Operand& rt,
1210 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001211 BRANCH_ARGS_CHECK(cond, rs, rt);
1212 ASSERT(!rs.is(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01001213 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001214 Register scratch = at;
1215
Andrei Popescu31002712010-02-23 13:46:05 +00001216 if (rt.is_reg()) {
1217 // We don't want any other register but scratch clobbered.
1218 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
1219 r2 = rt.rm_;
Steve Block44f0eee2011-05-26 01:26:41 +01001220 switch (cond) {
1221 case cc_always:
1222 b(offset);
1223 break;
1224 case eq:
1225 beq(rs, r2, offset);
1226 break;
1227 case ne:
1228 bne(rs, r2, offset);
1229 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001230 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001231 case greater:
1232 if (r2.is(zero_reg)) {
1233 bgtz(rs, offset);
1234 } else {
1235 slt(scratch, r2, rs);
1236 bne(scratch, zero_reg, offset);
1237 }
1238 break;
1239 case greater_equal:
1240 if (r2.is(zero_reg)) {
1241 bgez(rs, offset);
1242 } else {
1243 slt(scratch, rs, r2);
1244 beq(scratch, zero_reg, offset);
1245 }
1246 break;
1247 case less:
1248 if (r2.is(zero_reg)) {
1249 bltz(rs, offset);
1250 } else {
1251 slt(scratch, rs, r2);
1252 bne(scratch, zero_reg, offset);
1253 }
1254 break;
1255 case less_equal:
1256 if (r2.is(zero_reg)) {
1257 blez(rs, offset);
1258 } else {
1259 slt(scratch, r2, rs);
1260 beq(scratch, zero_reg, offset);
1261 }
1262 break;
Andrei Popescu31002712010-02-23 13:46:05 +00001263 // Unsigned comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001264 case Ugreater:
1265 if (r2.is(zero_reg)) {
1266 bgtz(rs, offset);
1267 } else {
1268 sltu(scratch, r2, rs);
1269 bne(scratch, zero_reg, offset);
1270 }
1271 break;
1272 case Ugreater_equal:
1273 if (r2.is(zero_reg)) {
1274 bgez(rs, offset);
1275 } else {
1276 sltu(scratch, rs, r2);
1277 beq(scratch, zero_reg, offset);
1278 }
1279 break;
1280 case Uless:
1281 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001282 // No code needs to be emitted.
1283 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001284 } else {
1285 sltu(scratch, rs, r2);
1286 bne(scratch, zero_reg, offset);
1287 }
1288 break;
1289 case Uless_equal:
1290 if (r2.is(zero_reg)) {
1291 b(offset);
1292 } else {
1293 sltu(scratch, r2, rs);
1294 beq(scratch, zero_reg, offset);
1295 }
1296 break;
1297 default:
1298 UNREACHABLE();
1299 }
1300 } else {
1301 // Be careful to always use shifted_branch_offset only just before the
1302 // branch instruction, as the location will be remember for patching the
1303 // target.
1304 switch (cond) {
1305 case cc_always:
1306 b(offset);
1307 break;
1308 case eq:
1309 // We don't want any other register but scratch clobbered.
1310 ASSERT(!scratch.is(rs));
1311 r2 = scratch;
1312 li(r2, rt);
1313 beq(rs, r2, offset);
1314 break;
1315 case ne:
1316 // We don't want any other register but scratch clobbered.
1317 ASSERT(!scratch.is(rs));
1318 r2 = scratch;
1319 li(r2, rt);
1320 bne(rs, r2, offset);
1321 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001322 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001323 case greater:
1324 if (rt.imm32_ == 0) {
1325 bgtz(rs, offset);
1326 } else {
1327 r2 = scratch;
1328 li(r2, rt);
1329 slt(scratch, r2, rs);
1330 bne(scratch, zero_reg, offset);
1331 }
1332 break;
1333 case greater_equal:
1334 if (rt.imm32_ == 0) {
1335 bgez(rs, offset);
1336 } else if (is_int16(rt.imm32_)) {
1337 slti(scratch, rs, rt.imm32_);
1338 beq(scratch, zero_reg, offset);
1339 } else {
1340 r2 = scratch;
1341 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001342 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001343 beq(scratch, zero_reg, offset);
1344 }
1345 break;
1346 case less:
1347 if (rt.imm32_ == 0) {
1348 bltz(rs, offset);
1349 } else if (is_int16(rt.imm32_)) {
1350 slti(scratch, rs, rt.imm32_);
1351 bne(scratch, zero_reg, offset);
1352 } else {
1353 r2 = scratch;
1354 li(r2, rt);
1355 slt(scratch, rs, r2);
1356 bne(scratch, zero_reg, offset);
1357 }
1358 break;
1359 case less_equal:
1360 if (rt.imm32_ == 0) {
1361 blez(rs, offset);
1362 } else {
1363 r2 = scratch;
1364 li(r2, rt);
1365 slt(scratch, r2, rs);
1366 beq(scratch, zero_reg, offset);
1367 }
1368 break;
1369 // Unsigned comparison.
1370 case Ugreater:
1371 if (rt.imm32_ == 0) {
1372 bgtz(rs, offset);
1373 } else {
1374 r2 = scratch;
1375 li(r2, rt);
1376 sltu(scratch, r2, rs);
1377 bne(scratch, zero_reg, offset);
1378 }
1379 break;
1380 case Ugreater_equal:
1381 if (rt.imm32_ == 0) {
1382 bgez(rs, offset);
1383 } else if (is_int16(rt.imm32_)) {
1384 sltiu(scratch, rs, rt.imm32_);
1385 beq(scratch, zero_reg, offset);
1386 } else {
1387 r2 = scratch;
1388 li(r2, rt);
1389 sltu(scratch, rs, r2);
1390 beq(scratch, zero_reg, offset);
1391 }
1392 break;
1393 case Uless:
1394 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001395 // No code needs to be emitted.
1396 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001397 } else if (is_int16(rt.imm32_)) {
1398 sltiu(scratch, rs, rt.imm32_);
1399 bne(scratch, zero_reg, offset);
1400 } else {
1401 r2 = scratch;
1402 li(r2, rt);
1403 sltu(scratch, rs, r2);
1404 bne(scratch, zero_reg, offset);
1405 }
1406 break;
1407 case Uless_equal:
1408 if (rt.imm32_ == 0) {
1409 b(offset);
1410 } else {
1411 r2 = scratch;
1412 li(r2, rt);
1413 sltu(scratch, r2, rs);
1414 beq(scratch, zero_reg, offset);
1415 }
1416 break;
1417 default:
1418 UNREACHABLE();
1419 }
Andrei Popescu31002712010-02-23 13:46:05 +00001420 }
Steve Block44f0eee2011-05-26 01:26:41 +01001421 // Emit a nop in the branch delay slot if required.
1422 if (bdslot == PROTECT)
1423 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001424}
1425
1426
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001427void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Andrei Popescu31002712010-02-23 13:46:05 +00001428 // We use branch_offset as an argument for the branch instructions to be sure
1429 // it is called just before generating the branch instruction, as needed.
1430
Steve Block44f0eee2011-05-26 01:26:41 +01001431 b(shifted_branch_offset(L, false));
Andrei Popescu31002712010-02-23 13:46:05 +00001432
Steve Block44f0eee2011-05-26 01:26:41 +01001433 // Emit a nop in the branch delay slot if required.
1434 if (bdslot == PROTECT)
1435 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001436}
1437
1438
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001439void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1440 const Operand& rt,
1441 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001442 BRANCH_ARGS_CHECK(cond, rs, rt);
1443
1444 int32_t offset;
1445 Register r2 = no_reg;
1446 Register scratch = at;
1447 if (rt.is_reg()) {
1448 r2 = rt.rm_;
1449 // Be careful to always use shifted_branch_offset only just before the
1450 // branch instruction, as the location will be remember for patching the
1451 // target.
1452 switch (cond) {
1453 case cc_always:
1454 offset = shifted_branch_offset(L, false);
1455 b(offset);
1456 break;
1457 case eq:
1458 offset = shifted_branch_offset(L, false);
1459 beq(rs, r2, offset);
1460 break;
1461 case ne:
1462 offset = shifted_branch_offset(L, false);
1463 bne(rs, r2, offset);
1464 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001465 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001466 case greater:
1467 if (r2.is(zero_reg)) {
1468 offset = shifted_branch_offset(L, false);
1469 bgtz(rs, offset);
1470 } else {
1471 slt(scratch, r2, rs);
1472 offset = shifted_branch_offset(L, false);
1473 bne(scratch, zero_reg, offset);
1474 }
1475 break;
1476 case greater_equal:
1477 if (r2.is(zero_reg)) {
1478 offset = shifted_branch_offset(L, false);
1479 bgez(rs, offset);
1480 } else {
1481 slt(scratch, rs, r2);
1482 offset = shifted_branch_offset(L, false);
1483 beq(scratch, zero_reg, offset);
1484 }
1485 break;
1486 case less:
1487 if (r2.is(zero_reg)) {
1488 offset = shifted_branch_offset(L, false);
1489 bltz(rs, offset);
1490 } else {
1491 slt(scratch, rs, r2);
1492 offset = shifted_branch_offset(L, false);
1493 bne(scratch, zero_reg, offset);
1494 }
1495 break;
1496 case less_equal:
1497 if (r2.is(zero_reg)) {
1498 offset = shifted_branch_offset(L, false);
1499 blez(rs, offset);
1500 } else {
1501 slt(scratch, r2, rs);
1502 offset = shifted_branch_offset(L, false);
1503 beq(scratch, zero_reg, offset);
1504 }
1505 break;
1506 // Unsigned comparison.
1507 case Ugreater:
1508 if (r2.is(zero_reg)) {
1509 offset = shifted_branch_offset(L, false);
1510 bgtz(rs, offset);
1511 } else {
1512 sltu(scratch, r2, rs);
1513 offset = shifted_branch_offset(L, false);
1514 bne(scratch, zero_reg, offset);
1515 }
1516 break;
1517 case Ugreater_equal:
1518 if (r2.is(zero_reg)) {
1519 offset = shifted_branch_offset(L, false);
1520 bgez(rs, offset);
1521 } else {
1522 sltu(scratch, rs, r2);
1523 offset = shifted_branch_offset(L, false);
1524 beq(scratch, zero_reg, offset);
1525 }
1526 break;
1527 case Uless:
1528 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001529 // No code needs to be emitted.
1530 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001531 } else {
1532 sltu(scratch, rs, r2);
1533 offset = shifted_branch_offset(L, false);
1534 bne(scratch, zero_reg, offset);
1535 }
1536 break;
1537 case Uless_equal:
1538 if (r2.is(zero_reg)) {
1539 offset = shifted_branch_offset(L, false);
1540 b(offset);
1541 } else {
1542 sltu(scratch, r2, rs);
1543 offset = shifted_branch_offset(L, false);
1544 beq(scratch, zero_reg, offset);
1545 }
1546 break;
1547 default:
1548 UNREACHABLE();
1549 }
1550 } else {
1551 // Be careful to always use shifted_branch_offset only just before the
1552 // branch instruction, as the location will be remember for patching the
1553 // target.
1554 switch (cond) {
1555 case cc_always:
1556 offset = shifted_branch_offset(L, false);
1557 b(offset);
1558 break;
1559 case eq:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001560 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001561 r2 = scratch;
1562 li(r2, rt);
1563 offset = shifted_branch_offset(L, false);
1564 beq(rs, r2, offset);
1565 break;
1566 case ne:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001567 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001568 r2 = scratch;
1569 li(r2, rt);
1570 offset = shifted_branch_offset(L, false);
1571 bne(rs, r2, offset);
1572 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001573 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001574 case greater:
1575 if (rt.imm32_ == 0) {
1576 offset = shifted_branch_offset(L, false);
1577 bgtz(rs, offset);
1578 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001579 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001580 r2 = scratch;
1581 li(r2, rt);
1582 slt(scratch, r2, rs);
1583 offset = shifted_branch_offset(L, false);
1584 bne(scratch, zero_reg, offset);
1585 }
1586 break;
1587 case greater_equal:
1588 if (rt.imm32_ == 0) {
1589 offset = shifted_branch_offset(L, false);
1590 bgez(rs, offset);
1591 } else if (is_int16(rt.imm32_)) {
1592 slti(scratch, rs, rt.imm32_);
1593 offset = shifted_branch_offset(L, false);
1594 beq(scratch, zero_reg, offset);
1595 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001596 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001597 r2 = scratch;
1598 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001599 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001600 offset = shifted_branch_offset(L, false);
1601 beq(scratch, zero_reg, offset);
1602 }
1603 break;
1604 case less:
1605 if (rt.imm32_ == 0) {
1606 offset = shifted_branch_offset(L, false);
1607 bltz(rs, offset);
1608 } else if (is_int16(rt.imm32_)) {
1609 slti(scratch, rs, rt.imm32_);
1610 offset = shifted_branch_offset(L, false);
1611 bne(scratch, zero_reg, offset);
1612 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001613 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001614 r2 = scratch;
1615 li(r2, rt);
1616 slt(scratch, rs, r2);
1617 offset = shifted_branch_offset(L, false);
1618 bne(scratch, zero_reg, offset);
1619 }
1620 break;
1621 case less_equal:
1622 if (rt.imm32_ == 0) {
1623 offset = shifted_branch_offset(L, false);
1624 blez(rs, offset);
1625 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001626 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001627 r2 = scratch;
1628 li(r2, rt);
1629 slt(scratch, r2, rs);
1630 offset = shifted_branch_offset(L, false);
1631 beq(scratch, zero_reg, offset);
1632 }
1633 break;
1634 // Unsigned comparison.
1635 case Ugreater:
1636 if (rt.imm32_ == 0) {
1637 offset = shifted_branch_offset(L, false);
1638 bgtz(rs, offset);
1639 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001640 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001641 r2 = scratch;
1642 li(r2, rt);
1643 sltu(scratch, r2, rs);
1644 offset = shifted_branch_offset(L, false);
1645 bne(scratch, zero_reg, offset);
1646 }
1647 break;
1648 case Ugreater_equal:
1649 if (rt.imm32_ == 0) {
1650 offset = shifted_branch_offset(L, false);
1651 bgez(rs, offset);
1652 } else if (is_int16(rt.imm32_)) {
1653 sltiu(scratch, rs, rt.imm32_);
1654 offset = shifted_branch_offset(L, false);
1655 beq(scratch, zero_reg, offset);
1656 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001657 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001658 r2 = scratch;
1659 li(r2, rt);
1660 sltu(scratch, rs, r2);
1661 offset = shifted_branch_offset(L, false);
1662 beq(scratch, zero_reg, offset);
1663 }
1664 break;
1665 case Uless:
1666 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001667 // No code needs to be emitted.
1668 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001669 } else if (is_int16(rt.imm32_)) {
1670 sltiu(scratch, rs, rt.imm32_);
1671 offset = shifted_branch_offset(L, false);
1672 bne(scratch, zero_reg, offset);
1673 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001674 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001675 r2 = scratch;
1676 li(r2, rt);
1677 sltu(scratch, rs, r2);
1678 offset = shifted_branch_offset(L, false);
1679 bne(scratch, zero_reg, offset);
1680 }
1681 break;
1682 case Uless_equal:
1683 if (rt.imm32_ == 0) {
1684 offset = shifted_branch_offset(L, false);
1685 b(offset);
1686 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001687 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001688 r2 = scratch;
1689 li(r2, rt);
1690 sltu(scratch, r2, rs);
1691 offset = shifted_branch_offset(L, false);
1692 beq(scratch, zero_reg, offset);
1693 }
1694 break;
1695 default:
1696 UNREACHABLE();
1697 }
1698 }
1699 // Check that offset could actually hold on an int16_t.
1700 ASSERT(is_int16(offset));
1701 // Emit a nop in the branch delay slot if required.
1702 if (bdslot == PROTECT)
1703 nop();
1704}
1705
1706
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001707void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
1708 BranchAndLinkShort(offset, bdslot);
1709}
1710
1711
1712void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1713 const Operand& rt,
1714 BranchDelaySlot bdslot) {
1715 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1716}
1717
1718
1719void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1720 bool is_label_near = is_near(L);
1721 if (UseAbsoluteCodePointers() && !is_label_near) {
1722 Jalr(L, bdslot);
1723 } else {
1724 BranchAndLinkShort(L, bdslot);
1725 }
1726}
1727
1728
1729void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1730 const Operand& rt,
1731 BranchDelaySlot bdslot) {
1732 bool is_label_near = is_near(L);
1733 if (UseAbsoluteCodePointers() && !is_label_near) {
1734 Label skip;
1735 Condition neg_cond = NegateCondition(cond);
1736 BranchShort(&skip, neg_cond, rs, rt);
1737 Jalr(L, bdslot);
1738 bind(&skip);
1739 } else {
1740 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1741 }
1742}
1743
1744
Andrei Popescu31002712010-02-23 13:46:05 +00001745// We need to use a bgezal or bltzal, but they can't be used directly with the
1746// slt instructions. We could use sub or add instead but we would miss overflow
1747// cases, so we keep slt and add an intermediate third instruction.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001748void MacroAssembler::BranchAndLinkShort(int16_t offset,
1749 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001750 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001751
Steve Block44f0eee2011-05-26 01:26:41 +01001752 // Emit a nop in the branch delay slot if required.
1753 if (bdslot == PROTECT)
1754 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001755}
1756
1757
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001758void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
1759 Register rs, const Operand& rt,
1760 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001761 BRANCH_ARGS_CHECK(cond, rs, rt);
Steve Block6ded16b2010-05-10 14:33:55 +01001762 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001763 Register scratch = at;
1764
Andrei Popescu31002712010-02-23 13:46:05 +00001765 if (rt.is_reg()) {
1766 r2 = rt.rm_;
1767 } else if (cond != cc_always) {
1768 r2 = scratch;
1769 li(r2, rt);
1770 }
1771
1772 switch (cond) {
1773 case cc_always:
Steve Block44f0eee2011-05-26 01:26:41 +01001774 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001775 break;
1776 case eq:
1777 bne(rs, r2, 2);
1778 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01001779 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001780 break;
1781 case ne:
1782 beq(rs, r2, 2);
1783 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01001784 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001785 break;
1786
Ben Murdoch257744e2011-11-30 15:57:28 +00001787 // Signed comparison.
Andrei Popescu31002712010-02-23 13:46:05 +00001788 case greater:
1789 slt(scratch, r2, rs);
1790 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001791 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001792 break;
1793 case greater_equal:
1794 slt(scratch, rs, r2);
1795 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001796 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001797 break;
1798 case less:
1799 slt(scratch, rs, r2);
1800 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001801 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001802 break;
1803 case less_equal:
1804 slt(scratch, r2, rs);
1805 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001806 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001807 break;
1808
1809 // Unsigned comparison.
1810 case Ugreater:
1811 sltu(scratch, r2, rs);
1812 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001813 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001814 break;
1815 case Ugreater_equal:
1816 sltu(scratch, rs, r2);
1817 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001818 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001819 break;
1820 case Uless:
1821 sltu(scratch, rs, r2);
1822 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001823 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001824 break;
1825 case Uless_equal:
1826 sltu(scratch, r2, rs);
1827 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001828 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001829 break;
1830
1831 default:
1832 UNREACHABLE();
1833 }
Steve Block44f0eee2011-05-26 01:26:41 +01001834 // Emit a nop in the branch delay slot if required.
1835 if (bdslot == PROTECT)
1836 nop();
1837}
1838
1839
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001840void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001841 bal(shifted_branch_offset(L, false));
1842
1843 // Emit a nop in the branch delay slot if required.
1844 if (bdslot == PROTECT)
1845 nop();
1846}
1847
1848
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001849void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
1850 const Operand& rt,
1851 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001852 BRANCH_ARGS_CHECK(cond, rs, rt);
1853
1854 int32_t offset;
1855 Register r2 = no_reg;
1856 Register scratch = at;
1857 if (rt.is_reg()) {
1858 r2 = rt.rm_;
1859 } else if (cond != cc_always) {
1860 r2 = scratch;
1861 li(r2, rt);
1862 }
1863
1864 switch (cond) {
1865 case cc_always:
1866 offset = shifted_branch_offset(L, false);
1867 bal(offset);
1868 break;
1869 case eq:
1870 bne(rs, r2, 2);
1871 nop();
1872 offset = shifted_branch_offset(L, false);
1873 bal(offset);
1874 break;
1875 case ne:
1876 beq(rs, r2, 2);
1877 nop();
1878 offset = shifted_branch_offset(L, false);
1879 bal(offset);
1880 break;
1881
Ben Murdoch257744e2011-11-30 15:57:28 +00001882 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001883 case greater:
1884 slt(scratch, r2, rs);
1885 addiu(scratch, scratch, -1);
1886 offset = shifted_branch_offset(L, false);
1887 bgezal(scratch, offset);
1888 break;
1889 case greater_equal:
1890 slt(scratch, rs, r2);
1891 addiu(scratch, scratch, -1);
1892 offset = shifted_branch_offset(L, false);
1893 bltzal(scratch, offset);
1894 break;
1895 case less:
1896 slt(scratch, rs, r2);
1897 addiu(scratch, scratch, -1);
1898 offset = shifted_branch_offset(L, false);
1899 bgezal(scratch, offset);
1900 break;
1901 case less_equal:
1902 slt(scratch, r2, rs);
1903 addiu(scratch, scratch, -1);
1904 offset = shifted_branch_offset(L, false);
1905 bltzal(scratch, offset);
1906 break;
1907
1908 // Unsigned comparison.
1909 case Ugreater:
1910 sltu(scratch, r2, rs);
1911 addiu(scratch, scratch, -1);
1912 offset = shifted_branch_offset(L, false);
1913 bgezal(scratch, offset);
1914 break;
1915 case Ugreater_equal:
1916 sltu(scratch, rs, r2);
1917 addiu(scratch, scratch, -1);
1918 offset = shifted_branch_offset(L, false);
1919 bltzal(scratch, offset);
1920 break;
1921 case Uless:
1922 sltu(scratch, rs, r2);
1923 addiu(scratch, scratch, -1);
1924 offset = shifted_branch_offset(L, false);
1925 bgezal(scratch, offset);
1926 break;
1927 case Uless_equal:
1928 sltu(scratch, r2, rs);
1929 addiu(scratch, scratch, -1);
1930 offset = shifted_branch_offset(L, false);
1931 bltzal(scratch, offset);
1932 break;
1933
1934 default:
1935 UNREACHABLE();
1936 }
1937
1938 // Check that offset could actually hold on an int16_t.
1939 ASSERT(is_int16(offset));
1940
1941 // Emit a nop in the branch delay slot if required.
1942 if (bdslot == PROTECT)
1943 nop();
1944}
1945
1946
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001947void MacroAssembler::Jump(Register target,
Steve Block44f0eee2011-05-26 01:26:41 +01001948 Condition cond,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001949 Register rs,
1950 const Operand& rt,
1951 BranchDelaySlot bd) {
1952 BlockTrampolinePoolScope block_trampoline_pool(this);
1953 if (cond == cc_always) {
1954 jr(target);
1955 } else {
1956 BRANCH_ARGS_CHECK(cond, rs, rt);
1957 Branch(2, NegateCondition(cond), rs, rt);
1958 jr(target);
1959 }
1960 // Emit a nop in the branch delay slot if required.
1961 if (bd == PROTECT)
1962 nop();
1963}
1964
1965
1966void MacroAssembler::Jump(intptr_t target,
1967 RelocInfo::Mode rmode,
1968 Condition cond,
1969 Register rs,
1970 const Operand& rt,
1971 BranchDelaySlot bd) {
1972 li(t9, Operand(target, rmode));
1973 Jump(t9, cond, rs, rt, bd);
1974}
1975
1976
1977void MacroAssembler::Jump(Address target,
1978 RelocInfo::Mode rmode,
1979 Condition cond,
1980 Register rs,
1981 const Operand& rt,
1982 BranchDelaySlot bd) {
1983 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1984 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
1985}
1986
1987
1988void MacroAssembler::Jump(Handle<Code> code,
1989 RelocInfo::Mode rmode,
1990 Condition cond,
1991 Register rs,
1992 const Operand& rt,
1993 BranchDelaySlot bd) {
1994 ASSERT(RelocInfo::IsCodeTarget(rmode));
1995 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
1996}
1997
1998
1999int MacroAssembler::CallSize(Register target,
2000 Condition cond,
2001 Register rs,
2002 const Operand& rt,
2003 BranchDelaySlot bd) {
2004 int size = 0;
2005
2006 if (cond == cc_always) {
2007 size += 1;
2008 } else {
2009 size += 3;
Steve Block44f0eee2011-05-26 01:26:41 +01002010 }
2011
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002012 if (bd == PROTECT)
2013 size += 1;
Steve Block44f0eee2011-05-26 01:26:41 +01002014
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002015 return size * kInstrSize;
2016}
Steve Block44f0eee2011-05-26 01:26:41 +01002017
Steve Block44f0eee2011-05-26 01:26:41 +01002018
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002019// Note: To call gcc-compiled C code on mips, you must call thru t9.
2020void MacroAssembler::Call(Register target,
2021 Condition cond,
2022 Register rs,
2023 const Operand& rt,
2024 BranchDelaySlot bd) {
2025 BlockTrampolinePoolScope block_trampoline_pool(this);
2026 Label start;
2027 bind(&start);
2028 if (cond == cc_always) {
2029 jalr(target);
2030 } else {
2031 BRANCH_ARGS_CHECK(cond, rs, rt);
2032 Branch(2, NegateCondition(cond), rs, rt);
2033 jalr(target);
Steve Block44f0eee2011-05-26 01:26:41 +01002034 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002035 // Emit a nop in the branch delay slot if required.
2036 if (bd == PROTECT)
2037 nop();
2038
2039 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2040 SizeOfCodeGeneratedSince(&start));
2041}
2042
2043
2044int MacroAssembler::CallSize(Address target,
2045 RelocInfo::Mode rmode,
2046 Condition cond,
2047 Register rs,
2048 const Operand& rt,
2049 BranchDelaySlot bd) {
2050 int size = CallSize(t9, cond, rs, rt, bd);
2051 return size + 2 * kInstrSize;
2052}
2053
2054
2055void MacroAssembler::Call(Address target,
2056 RelocInfo::Mode rmode,
2057 Condition cond,
2058 Register rs,
2059 const Operand& rt,
2060 BranchDelaySlot bd) {
2061 BlockTrampolinePoolScope block_trampoline_pool(this);
2062 Label start;
2063 bind(&start);
2064 int32_t target_int = reinterpret_cast<int32_t>(target);
2065 // Must record previous source positions before the
2066 // li() generates a new code target.
2067 positions_recorder()->WriteRecordedPositions();
2068 li(t9, Operand(target_int, rmode), true);
2069 Call(t9, cond, rs, rt, bd);
2070 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2071 SizeOfCodeGeneratedSince(&start));
2072}
2073
2074
2075int MacroAssembler::CallSize(Handle<Code> code,
2076 RelocInfo::Mode rmode,
2077 unsigned ast_id,
2078 Condition cond,
2079 Register rs,
2080 const Operand& rt,
2081 BranchDelaySlot bd) {
2082 return CallSize(reinterpret_cast<Address>(code.location()),
2083 rmode, cond, rs, rt, bd);
2084}
2085
2086
2087void MacroAssembler::Call(Handle<Code> code,
2088 RelocInfo::Mode rmode,
2089 unsigned ast_id,
2090 Condition cond,
2091 Register rs,
2092 const Operand& rt,
2093 BranchDelaySlot bd) {
2094 BlockTrampolinePoolScope block_trampoline_pool(this);
2095 Label start;
2096 bind(&start);
2097 ASSERT(RelocInfo::IsCodeTarget(rmode));
2098 if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2099 SetRecordedAstId(ast_id);
2100 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2101 }
2102 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2103 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
2104 SizeOfCodeGeneratedSince(&start));
2105}
2106
2107
2108void MacroAssembler::Ret(Condition cond,
2109 Register rs,
2110 const Operand& rt,
2111 BranchDelaySlot bd) {
2112 Jump(ra, cond, rs, rt, bd);
2113}
2114
2115
2116void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2117 BlockTrampolinePoolScope block_trampoline_pool(this);
2118
2119 uint32_t imm28;
2120 imm28 = jump_address(L);
2121 imm28 &= kImm28Mask;
2122 { BlockGrowBufferScope block_buf_growth(this);
2123 // Buffer growth (and relocation) must be blocked for internal references
2124 // until associated instructions are emitted and available to be patched.
2125 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2126 j(imm28);
2127 }
2128 // Emit a nop in the branch delay slot if required.
2129 if (bdslot == PROTECT)
2130 nop();
2131}
2132
2133
2134void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2135 BlockTrampolinePoolScope block_trampoline_pool(this);
2136
2137 uint32_t imm32;
2138 imm32 = jump_address(L);
2139 { BlockGrowBufferScope block_buf_growth(this);
2140 // Buffer growth (and relocation) must be blocked for internal references
2141 // until associated instructions are emitted and available to be patched.
2142 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2143 lui(at, (imm32 & kHiMask) >> kLuiShift);
2144 ori(at, at, (imm32 & kImm16Mask));
2145 }
2146 jr(at);
2147
2148 // Emit a nop in the branch delay slot if required.
2149 if (bdslot == PROTECT)
2150 nop();
2151}
2152
2153
2154void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2155 BlockTrampolinePoolScope block_trampoline_pool(this);
2156
2157 uint32_t imm32;
2158 imm32 = jump_address(L);
2159 { BlockGrowBufferScope block_buf_growth(this);
2160 // Buffer growth (and relocation) must be blocked for internal references
2161 // until associated instructions are emitted and available to be patched.
2162 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2163 lui(at, (imm32 & kHiMask) >> kLuiShift);
2164 ori(at, at, (imm32 & kImm16Mask));
2165 }
2166 jalr(at);
2167
2168 // Emit a nop in the branch delay slot if required.
2169 if (bdslot == PROTECT)
2170 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002171}
2172
2173
2174void MacroAssembler::DropAndRet(int drop,
2175 Condition cond,
2176 Register r1,
2177 const Operand& r2) {
2178 // This is a workaround to make sure only one branch instruction is
2179 // generated. It relies on Drop and Ret not creating branches if
2180 // cond == cc_always.
2181 Label skip;
2182 if (cond != cc_always) {
2183 Branch(&skip, NegateCondition(cond), r1, r2);
2184 }
2185
2186 Drop(drop);
2187 Ret();
2188
2189 if (cond != cc_always) {
2190 bind(&skip);
2191 }
2192}
2193
2194
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002195void MacroAssembler::Drop(int count,
2196 Condition cond,
2197 Register reg,
2198 const Operand& op) {
2199 if (count <= 0) {
2200 return;
2201 }
2202
2203 Label skip;
2204
2205 if (cond != al) {
2206 Branch(&skip, NegateCondition(cond), reg, op);
2207 }
2208
2209 addiu(sp, sp, count * kPointerSize);
2210
2211 if (cond != al) {
2212 bind(&skip);
2213 }
2214}
2215
2216
2217
Steve Block44f0eee2011-05-26 01:26:41 +01002218void MacroAssembler::Swap(Register reg1,
2219 Register reg2,
2220 Register scratch) {
2221 if (scratch.is(no_reg)) {
2222 Xor(reg1, reg1, Operand(reg2));
2223 Xor(reg2, reg2, Operand(reg1));
2224 Xor(reg1, reg1, Operand(reg2));
2225 } else {
2226 mov(scratch, reg1);
2227 mov(reg1, reg2);
2228 mov(reg2, scratch);
2229 }
Andrei Popescu31002712010-02-23 13:46:05 +00002230}
2231
2232
2233void MacroAssembler::Call(Label* target) {
Steve Block44f0eee2011-05-26 01:26:41 +01002234 BranchAndLink(target);
2235}
2236
2237
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002238void MacroAssembler::Push(Handle<Object> handle) {
2239 li(at, Operand(handle));
2240 push(at);
2241}
2242
2243
Steve Block6ded16b2010-05-10 14:33:55 +01002244#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002245
Steve Block44f0eee2011-05-26 01:26:41 +01002246void MacroAssembler::DebugBreak() {
2247 ASSERT(allow_stub_calls());
2248 mov(a0, zero_reg);
2249 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2250 CEntryStub ces(1);
2251 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2252}
2253
2254#endif // ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002255
2256
Andrei Popescu31002712010-02-23 13:46:05 +00002257// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002258// Exception handling.
Andrei Popescu31002712010-02-23 13:46:05 +00002259
2260void MacroAssembler::PushTryHandler(CodeLocation try_location,
2261 HandlerType type) {
Steve Block6ded16b2010-05-10 14:33:55 +01002262 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002263 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2264 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2265 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2266 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2267 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2268 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2269
Steve Block6ded16b2010-05-10 14:33:55 +01002270 // The return address is passed in register ra.
2271 if (try_location == IN_JAVASCRIPT) {
2272 if (type == TRY_CATCH_HANDLER) {
2273 li(t0, Operand(StackHandler::TRY_CATCH));
2274 } else {
2275 li(t0, Operand(StackHandler::TRY_FINALLY));
2276 }
Steve Block6ded16b2010-05-10 14:33:55 +01002277 // Save the current handler as the next handler.
Steve Block44f0eee2011-05-26 01:26:41 +01002278 li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01002279 lw(t1, MemOperand(t2));
2280
2281 addiu(sp, sp, -StackHandlerConstants::kSize);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002282 sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
2283 sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
2284 sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
2285 sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
2286 sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01002287
2288 // Link this handler as the new current one.
2289 sw(sp, MemOperand(t2));
2290
2291 } else {
2292 // Must preserve a0-a3, and s0 (argv).
2293 ASSERT(try_location == IN_JS_ENTRY);
Steve Block6ded16b2010-05-10 14:33:55 +01002294 // The frame pointer does not point to a JS frame so we save NULL
2295 // for fp. We expect the code throwing an exception to check fp
2296 // before dereferencing it to restore the context.
2297 li(t0, Operand(StackHandler::ENTRY));
2298
2299 // Save the current handler as the next handler.
Steve Block44f0eee2011-05-26 01:26:41 +01002300 li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01002301 lw(t1, MemOperand(t2));
2302
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002303 ASSERT(Smi::FromInt(0) == 0); // Used for no context.
2304
Steve Block6ded16b2010-05-10 14:33:55 +01002305 addiu(sp, sp, -StackHandlerConstants::kSize);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002306 sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
2307 sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
2308 sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
2309 sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
2310 sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01002311
2312 // Link this handler as the new current one.
2313 sw(sp, MemOperand(t2));
2314 }
Andrei Popescu31002712010-02-23 13:46:05 +00002315}
2316
2317
2318void MacroAssembler::PopTryHandler() {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002319 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01002320 pop(a1);
2321 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2322 li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
2323 sw(a1, MemOperand(at));
Andrei Popescu31002712010-02-23 13:46:05 +00002324}
2325
2326
Ben Murdoch257744e2011-11-30 15:57:28 +00002327void MacroAssembler::Throw(Register value) {
2328 // v0 is expected to hold the exception.
2329 Move(v0, value);
2330
2331 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002332 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2333 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2334 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2335 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2336 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2337 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002338
2339 // Drop the sp to the top of the handler.
2340 li(a3, Operand(ExternalReference(Isolate::k_handler_address,
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002341 isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002342 lw(sp, MemOperand(a3));
2343
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002344 // Restore the next handler.
Ben Murdoch257744e2011-11-30 15:57:28 +00002345 pop(a2);
2346 sw(a2, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002347
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002348 // Restore context and frame pointer, discard state (a3).
2349 MultiPop(a3.bit() | cp.bit() | fp.bit());
2350
2351 // If the handler is a JS frame, restore the context to the frame.
2352 // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
2353 // of them.
Ben Murdoch257744e2011-11-30 15:57:28 +00002354 Label done;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002355 Branch(&done, eq, fp, Operand(zero_reg));
2356 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00002357 bind(&done);
2358
2359#ifdef DEBUG
2360 // When emitting debug_code, set ra as return address for the jump.
2361 // 5 instructions: add: 1, pop: 2, jump: 2.
2362 const int kOffsetRaInstructions = 5;
2363 Label find_ra;
2364
2365 if (emit_debug_code()) {
2366 // Compute ra for the Jump(t9).
2367 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
2368
2369 // This branch-and-link sequence is needed to get the current PC on mips,
2370 // saved to the ra register. Then adjusted for instruction count.
2371 bal(&find_ra); // bal exposes branch-delay.
2372 nop(); // Branch delay slot nop.
2373 bind(&find_ra);
2374 addiu(ra, ra, kOffsetRaBytes);
2375 }
2376#endif
2377
Ben Murdoch257744e2011-11-30 15:57:28 +00002378 pop(t9); // 2 instructions: lw, add sp.
2379 Jump(t9); // 2 instructions: jr, nop (in delay slot).
2380
2381 if (emit_debug_code()) {
2382 // Make sure that the expected number of instructions were generated.
2383 ASSERT_EQ(kOffsetRaInstructions,
2384 InstructionsGeneratedSince(&find_ra));
2385 }
2386}
2387
2388
2389void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2390 Register value) {
2391 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002392 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2393 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2394 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2395 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2396 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2397 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002398
2399 // v0 is expected to hold the exception.
2400 Move(v0, value);
2401
2402 // Drop sp to the top stack handler.
2403 li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
2404 lw(sp, MemOperand(a3));
2405
2406 // Unwind the handlers until the ENTRY handler is found.
2407 Label loop, done;
2408 bind(&loop);
2409 // Load the type of the current stack handler.
2410 const int kStateOffset = StackHandlerConstants::kStateOffset;
2411 lw(a2, MemOperand(sp, kStateOffset));
2412 Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
2413 // Fetch the next handler in the list.
2414 const int kNextOffset = StackHandlerConstants::kNextOffset;
2415 lw(sp, MemOperand(sp, kNextOffset));
2416 jmp(&loop);
2417 bind(&done);
2418
2419 // Set the top handler address to next handler past the current ENTRY handler.
Ben Murdoch257744e2011-11-30 15:57:28 +00002420 pop(a2);
2421 sw(a2, MemOperand(a3));
2422
2423 if (type == OUT_OF_MEMORY) {
2424 // Set external caught exception to false.
2425 ExternalReference external_caught(
2426 Isolate::k_external_caught_exception_address, isolate());
2427 li(a0, Operand(false, RelocInfo::NONE));
2428 li(a2, Operand(external_caught));
2429 sw(a0, MemOperand(a2));
2430
2431 // Set pending exception and v0 to out of memory exception.
2432 Failure* out_of_memory = Failure::OutOfMemoryException();
2433 li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
2434 li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
2435 isolate())));
2436 sw(v0, MemOperand(a2));
2437 }
2438
2439 // Stack layout at this point. See also StackHandlerConstants.
2440 // sp -> state (ENTRY)
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002441 // cp
Ben Murdoch257744e2011-11-30 15:57:28 +00002442 // fp
2443 // ra
2444
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002445 // Restore context and frame pointer, discard state (r2).
2446 MultiPop(a2.bit() | cp.bit() | fp.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00002447
2448#ifdef DEBUG
2449 // When emitting debug_code, set ra as return address for the jump.
2450 // 5 instructions: add: 1, pop: 2, jump: 2.
2451 const int kOffsetRaInstructions = 5;
2452 Label find_ra;
2453
2454 if (emit_debug_code()) {
2455 // Compute ra for the Jump(t9).
2456 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
2457
2458 // This branch-and-link sequence is needed to get the current PC on mips,
2459 // saved to the ra register. Then adjusted for instruction count.
2460 bal(&find_ra); // bal exposes branch-delay slot.
2461 nop(); // Branch delay slot nop.
2462 bind(&find_ra);
2463 addiu(ra, ra, kOffsetRaBytes);
2464 }
2465#endif
Ben Murdoch257744e2011-11-30 15:57:28 +00002466 pop(t9); // 2 instructions: lw, add sp.
2467 Jump(t9); // 2 instructions: jr, nop (in delay slot).
2468
2469 if (emit_debug_code()) {
2470 // Make sure that the expected number of instructions were generated.
2471 ASSERT_EQ(kOffsetRaInstructions,
2472 InstructionsGeneratedSince(&find_ra));
2473 }
2474}
2475
2476
Steve Block44f0eee2011-05-26 01:26:41 +01002477void MacroAssembler::AllocateInNewSpace(int object_size,
2478 Register result,
2479 Register scratch1,
2480 Register scratch2,
2481 Label* gc_required,
2482 AllocationFlags flags) {
2483 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002484 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002485 // Trash the registers to simulate an allocation failure.
2486 li(result, 0x7091);
2487 li(scratch1, 0x7191);
2488 li(scratch2, 0x7291);
2489 }
2490 jmp(gc_required);
2491 return;
Steve Block6ded16b2010-05-10 14:33:55 +01002492 }
2493
Steve Block44f0eee2011-05-26 01:26:41 +01002494 ASSERT(!result.is(scratch1));
2495 ASSERT(!result.is(scratch2));
2496 ASSERT(!scratch1.is(scratch2));
2497 ASSERT(!scratch1.is(t9));
2498 ASSERT(!scratch2.is(t9));
2499 ASSERT(!result.is(t9));
Steve Block6ded16b2010-05-10 14:33:55 +01002500
Steve Block44f0eee2011-05-26 01:26:41 +01002501 // Make object size into bytes.
2502 if ((flags & SIZE_IN_WORDS) != 0) {
2503 object_size *= kPointerSize;
2504 }
2505 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01002506
Steve Block44f0eee2011-05-26 01:26:41 +01002507 // Check relative positions of allocation top and limit addresses.
2508 // ARM adds additional checks to make sure the ldm instruction can be
2509 // used. On MIPS we don't have ldm so we don't need additional checks either.
2510 ExternalReference new_space_allocation_top =
2511 ExternalReference::new_space_allocation_top_address(isolate());
2512 ExternalReference new_space_allocation_limit =
2513 ExternalReference::new_space_allocation_limit_address(isolate());
2514 intptr_t top =
2515 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2516 intptr_t limit =
2517 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2518 ASSERT((limit - top) == kPointerSize);
2519
2520 // Set up allocation top address and object size registers.
2521 Register topaddr = scratch1;
2522 Register obj_size_reg = scratch2;
2523 li(topaddr, Operand(new_space_allocation_top));
2524 li(obj_size_reg, Operand(object_size));
2525
2526 // This code stores a temporary value in t9.
2527 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2528 // Load allocation top into result and allocation limit into t9.
2529 lw(result, MemOperand(topaddr));
2530 lw(t9, MemOperand(topaddr, kPointerSize));
2531 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002532 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002533 // Assert that result actually contains top on entry. t9 is used
2534 // immediately below so this use of t9 does not cause difference with
2535 // respect to register content between debug and release mode.
2536 lw(t9, MemOperand(topaddr));
2537 Check(eq, "Unexpected allocation top", result, Operand(t9));
2538 }
2539 // Load allocation limit into t9. Result already contains allocation top.
2540 lw(t9, MemOperand(topaddr, limit - top));
2541 }
2542
2543 // Calculate new top and bail out if new space is exhausted. Use result
2544 // to calculate the new top.
2545 Addu(scratch2, result, Operand(obj_size_reg));
2546 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2547 sw(scratch2, MemOperand(topaddr));
2548
2549 // Tag object if requested.
2550 if ((flags & TAG_OBJECT) != 0) {
2551 Addu(result, result, Operand(kHeapObjectTag));
2552 }
Steve Block6ded16b2010-05-10 14:33:55 +01002553}
2554
2555
Steve Block44f0eee2011-05-26 01:26:41 +01002556void MacroAssembler::AllocateInNewSpace(Register object_size,
2557 Register result,
2558 Register scratch1,
2559 Register scratch2,
2560 Label* gc_required,
2561 AllocationFlags flags) {
2562 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002563 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002564 // Trash the registers to simulate an allocation failure.
2565 li(result, 0x7091);
2566 li(scratch1, 0x7191);
2567 li(scratch2, 0x7291);
2568 }
2569 jmp(gc_required);
2570 return;
2571 }
2572
2573 ASSERT(!result.is(scratch1));
2574 ASSERT(!result.is(scratch2));
2575 ASSERT(!scratch1.is(scratch2));
2576 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2577
2578 // Check relative positions of allocation top and limit addresses.
2579 // ARM adds additional checks to make sure the ldm instruction can be
2580 // used. On MIPS we don't have ldm so we don't need additional checks either.
2581 ExternalReference new_space_allocation_top =
2582 ExternalReference::new_space_allocation_top_address(isolate());
2583 ExternalReference new_space_allocation_limit =
2584 ExternalReference::new_space_allocation_limit_address(isolate());
2585 intptr_t top =
2586 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2587 intptr_t limit =
2588 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2589 ASSERT((limit - top) == kPointerSize);
2590
2591 // Set up allocation top address and object size registers.
2592 Register topaddr = scratch1;
2593 li(topaddr, Operand(new_space_allocation_top));
2594
2595 // This code stores a temporary value in t9.
2596 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2597 // Load allocation top into result and allocation limit into t9.
2598 lw(result, MemOperand(topaddr));
2599 lw(t9, MemOperand(topaddr, kPointerSize));
2600 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002601 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002602 // Assert that result actually contains top on entry. t9 is used
2603 // immediately below so this use of t9 does not cause difference with
2604 // respect to register content between debug and release mode.
2605 lw(t9, MemOperand(topaddr));
2606 Check(eq, "Unexpected allocation top", result, Operand(t9));
2607 }
2608 // Load allocation limit into t9. Result already contains allocation top.
2609 lw(t9, MemOperand(topaddr, limit - top));
2610 }
2611
2612 // Calculate new top and bail out if new space is exhausted. Use result
2613 // to calculate the new top. Object size may be in words so a shift is
2614 // required to get the number of bytes.
2615 if ((flags & SIZE_IN_WORDS) != 0) {
2616 sll(scratch2, object_size, kPointerSizeLog2);
2617 Addu(scratch2, result, scratch2);
2618 } else {
2619 Addu(scratch2, result, Operand(object_size));
2620 }
2621 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2622
2623 // Update allocation top. result temporarily holds the new top.
Ben Murdoch257744e2011-11-30 15:57:28 +00002624 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002625 And(t9, scratch2, Operand(kObjectAlignmentMask));
2626 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2627 }
2628 sw(scratch2, MemOperand(topaddr));
2629
2630 // Tag object if requested.
2631 if ((flags & TAG_OBJECT) != 0) {
2632 Addu(result, result, Operand(kHeapObjectTag));
2633 }
2634}
2635
2636
2637void MacroAssembler::UndoAllocationInNewSpace(Register object,
2638 Register scratch) {
2639 ExternalReference new_space_allocation_top =
2640 ExternalReference::new_space_allocation_top_address(isolate());
2641
2642 // Make sure the object has no tag before resetting top.
2643 And(object, object, Operand(~kHeapObjectTagMask));
2644#ifdef DEBUG
2645 // Check that the object un-allocated is below the current top.
2646 li(scratch, Operand(new_space_allocation_top));
2647 lw(scratch, MemOperand(scratch));
2648 Check(less, "Undo allocation of non allocated memory",
2649 object, Operand(scratch));
2650#endif
2651 // Write the address of the object to un-allocate as the current top.
2652 li(scratch, Operand(new_space_allocation_top));
2653 sw(object, MemOperand(scratch));
2654}
2655
2656
2657void MacroAssembler::AllocateTwoByteString(Register result,
2658 Register length,
2659 Register scratch1,
2660 Register scratch2,
2661 Register scratch3,
2662 Label* gc_required) {
2663 // Calculate the number of bytes needed for the characters in the string while
2664 // observing object alignment.
2665 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2666 sll(scratch1, length, 1); // Length in bytes, not chars.
2667 addiu(scratch1, scratch1,
2668 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2669 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2670
2671 // Allocate two-byte string in new space.
2672 AllocateInNewSpace(scratch1,
2673 result,
2674 scratch2,
2675 scratch3,
2676 gc_required,
2677 TAG_OBJECT);
2678
2679 // Set the map, length and hash field.
2680 InitializeNewString(result,
2681 length,
2682 Heap::kStringMapRootIndex,
2683 scratch1,
2684 scratch2);
2685}
2686
2687
2688void MacroAssembler::AllocateAsciiString(Register result,
2689 Register length,
2690 Register scratch1,
2691 Register scratch2,
2692 Register scratch3,
2693 Label* gc_required) {
2694 // Calculate the number of bytes needed for the characters in the string
2695 // while observing object alignment.
2696 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2697 ASSERT(kCharSize == 1);
2698 addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2699 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2700
2701 // Allocate ASCII string in new space.
2702 AllocateInNewSpace(scratch1,
2703 result,
2704 scratch2,
2705 scratch3,
2706 gc_required,
2707 TAG_OBJECT);
2708
2709 // Set the map, length and hash field.
2710 InitializeNewString(result,
2711 length,
2712 Heap::kAsciiStringMapRootIndex,
2713 scratch1,
2714 scratch2);
2715}
2716
2717
2718void MacroAssembler::AllocateTwoByteConsString(Register result,
2719 Register length,
2720 Register scratch1,
2721 Register scratch2,
2722 Label* gc_required) {
2723 AllocateInNewSpace(ConsString::kSize,
2724 result,
2725 scratch1,
2726 scratch2,
2727 gc_required,
2728 TAG_OBJECT);
2729 InitializeNewString(result,
2730 length,
2731 Heap::kConsStringMapRootIndex,
2732 scratch1,
2733 scratch2);
2734}
2735
2736
2737void MacroAssembler::AllocateAsciiConsString(Register result,
2738 Register length,
2739 Register scratch1,
2740 Register scratch2,
2741 Label* gc_required) {
2742 AllocateInNewSpace(ConsString::kSize,
2743 result,
2744 scratch1,
2745 scratch2,
2746 gc_required,
2747 TAG_OBJECT);
2748 InitializeNewString(result,
2749 length,
2750 Heap::kConsAsciiStringMapRootIndex,
2751 scratch1,
2752 scratch2);
2753}
2754
2755
2756// Allocates a heap number or jumps to the label if the young space is full and
2757// a scavenge is needed.
2758void MacroAssembler::AllocateHeapNumber(Register result,
2759 Register scratch1,
2760 Register scratch2,
2761 Register heap_number_map,
2762 Label* need_gc) {
2763 // Allocate an object in the heap for the heap number and tag it as a heap
2764 // object.
2765 AllocateInNewSpace(HeapNumber::kSize,
2766 result,
2767 scratch1,
2768 scratch2,
2769 need_gc,
2770 TAG_OBJECT);
2771
2772 // Store heap number map in the allocated object.
2773 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2774 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2775}
2776
2777
2778void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2779 FPURegister value,
2780 Register scratch1,
2781 Register scratch2,
2782 Label* gc_required) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002783 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
2784 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
Steve Block44f0eee2011-05-26 01:26:41 +01002785 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2786}
2787
2788
2789// Copies a fixed number of fields of heap objects from src to dst.
2790void MacroAssembler::CopyFields(Register dst,
2791 Register src,
2792 RegList temps,
2793 int field_count) {
2794 ASSERT((temps & dst.bit()) == 0);
2795 ASSERT((temps & src.bit()) == 0);
2796 // Primitive implementation using only one temporary register.
2797
2798 Register tmp = no_reg;
2799 // Find a temp register in temps list.
2800 for (int i = 0; i < kNumRegisters; i++) {
2801 if ((temps & (1 << i)) != 0) {
2802 tmp.code_ = i;
2803 break;
2804 }
2805 }
2806 ASSERT(!tmp.is(no_reg));
2807
2808 for (int i = 0; i < field_count; i++) {
2809 lw(tmp, FieldMemOperand(src, i * kPointerSize));
2810 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
2811 }
2812}
2813
2814
Ben Murdoch257744e2011-11-30 15:57:28 +00002815void MacroAssembler::CopyBytes(Register src,
2816 Register dst,
2817 Register length,
2818 Register scratch) {
2819 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2820
2821 // Align src before copying in word size chunks.
2822 bind(&align_loop);
2823 Branch(&done, eq, length, Operand(zero_reg));
2824 bind(&align_loop_1);
2825 And(scratch, src, kPointerSize - 1);
2826 Branch(&word_loop, eq, scratch, Operand(zero_reg));
2827 lbu(scratch, MemOperand(src));
2828 Addu(src, src, 1);
2829 sb(scratch, MemOperand(dst));
2830 Addu(dst, dst, 1);
2831 Subu(length, length, Operand(1));
2832 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2833
2834 // Copy bytes in word size chunks.
2835 bind(&word_loop);
2836 if (emit_debug_code()) {
2837 And(scratch, src, kPointerSize - 1);
2838 Assert(eq, "Expecting alignment for CopyBytes",
2839 scratch, Operand(zero_reg));
2840 }
2841 Branch(&byte_loop, lt, length, Operand(kPointerSize));
2842 lw(scratch, MemOperand(src));
2843 Addu(src, src, kPointerSize);
2844
2845 // TODO(kalmard) check if this can be optimized to use sw in most cases.
2846 // Can't use unaligned access - copy byte by byte.
2847 sb(scratch, MemOperand(dst, 0));
2848 srl(scratch, scratch, 8);
2849 sb(scratch, MemOperand(dst, 1));
2850 srl(scratch, scratch, 8);
2851 sb(scratch, MemOperand(dst, 2));
2852 srl(scratch, scratch, 8);
2853 sb(scratch, MemOperand(dst, 3));
2854 Addu(dst, dst, 4);
2855
2856 Subu(length, length, Operand(kPointerSize));
2857 Branch(&word_loop);
2858
2859 // Copy the last bytes if any left.
2860 bind(&byte_loop);
2861 Branch(&done, eq, length, Operand(zero_reg));
2862 bind(&byte_loop_1);
2863 lbu(scratch, MemOperand(src));
2864 Addu(src, src, 1);
2865 sb(scratch, MemOperand(dst));
2866 Addu(dst, dst, 1);
2867 Subu(length, length, Operand(1));
2868 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2869 bind(&done);
2870}
2871
2872
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002873void MacroAssembler::CheckFastElements(Register map,
2874 Register scratch,
2875 Label* fail) {
2876 STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
2877 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2878 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
2879}
2880
2881
Steve Block44f0eee2011-05-26 01:26:41 +01002882void MacroAssembler::CheckMap(Register obj,
2883 Register scratch,
2884 Handle<Map> map,
2885 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00002886 SmiCheckType smi_check_type) {
2887 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01002888 JumpIfSmi(obj, fail);
2889 }
2890 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2891 li(at, Operand(map));
2892 Branch(fail, ne, scratch, Operand(at));
2893}
2894
2895
Ben Murdoch257744e2011-11-30 15:57:28 +00002896void MacroAssembler::DispatchMap(Register obj,
2897 Register scratch,
2898 Handle<Map> map,
2899 Handle<Code> success,
2900 SmiCheckType smi_check_type) {
2901 Label fail;
2902 if (smi_check_type == DO_SMI_CHECK) {
2903 JumpIfSmi(obj, &fail);
2904 }
2905 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2906 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
2907 bind(&fail);
2908}
2909
2910
Steve Block44f0eee2011-05-26 01:26:41 +01002911void MacroAssembler::CheckMap(Register obj,
2912 Register scratch,
2913 Heap::RootListIndex index,
2914 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00002915 SmiCheckType smi_check_type) {
2916 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01002917 JumpIfSmi(obj, fail);
2918 }
2919 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2920 LoadRoot(at, index);
2921 Branch(fail, ne, scratch, Operand(at));
Steve Block6ded16b2010-05-10 14:33:55 +01002922}
2923
2924
Ben Murdoch257744e2011-11-30 15:57:28 +00002925void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
2926 CpuFeatures::Scope scope(FPU);
2927 if (IsMipsSoftFloatABI) {
2928 Move(dst, v0, v1);
2929 } else {
2930 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
2931 }
2932}
2933
2934
2935void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
2936 CpuFeatures::Scope scope(FPU);
2937 if (!IsMipsSoftFloatABI) {
2938 Move(f12, dreg);
2939 } else {
2940 Move(a0, a1, dreg);
2941 }
2942}
2943
2944
2945void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
2946 DoubleRegister dreg2) {
2947 CpuFeatures::Scope scope(FPU);
2948 if (!IsMipsSoftFloatABI) {
2949 if (dreg2.is(f12)) {
2950 ASSERT(!dreg1.is(f14));
2951 Move(f14, dreg2);
2952 Move(f12, dreg1);
2953 } else {
2954 Move(f12, dreg1);
2955 Move(f14, dreg2);
2956 }
2957 } else {
2958 Move(a0, a1, dreg1);
2959 Move(a2, a3, dreg2);
2960 }
2961}
2962
2963
2964void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
2965 Register reg) {
2966 CpuFeatures::Scope scope(FPU);
2967 if (!IsMipsSoftFloatABI) {
2968 Move(f12, dreg);
2969 Move(a2, reg);
2970 } else {
2971 Move(a2, reg);
2972 Move(a0, a1, dreg);
2973 }
2974}
2975
2976
2977void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
2978 // This macro takes the dst register to make the code more readable
2979 // at the call sites. However, the dst register has to be t1 to
2980 // follow the calling convention which requires the call type to be
2981 // in t1.
2982 ASSERT(dst.is(t1));
2983 if (call_kind == CALL_AS_FUNCTION) {
2984 li(dst, Operand(Smi::FromInt(1)));
2985 } else {
2986 li(dst, Operand(Smi::FromInt(0)));
2987 }
2988}
2989
2990
Steve Block6ded16b2010-05-10 14:33:55 +01002991// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002992// JavaScript invokes.
Steve Block6ded16b2010-05-10 14:33:55 +01002993
2994void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2995 const ParameterCount& actual,
2996 Handle<Code> code_constant,
2997 Register code_reg,
2998 Label* done,
Steve Block44f0eee2011-05-26 01:26:41 +01002999 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003000 const CallWrapper& call_wrapper,
3001 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003002 bool definitely_matches = false;
3003 Label regular_invoke;
3004
3005 // Check whether the expected and actual arguments count match. If not,
3006 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3007 // a0: actual arguments count
3008 // a1: function (passed through to callee)
3009 // a2: expected arguments count
3010 // a3: callee code entry
3011
3012 // The code below is made a lot easier because the calling code already sets
3013 // up actual and expected registers according to the contract if values are
3014 // passed in registers.
3015 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3016 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3017 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3018
3019 if (expected.is_immediate()) {
3020 ASSERT(actual.is_immediate());
3021 if (expected.immediate() == actual.immediate()) {
3022 definitely_matches = true;
3023 } else {
3024 li(a0, Operand(actual.immediate()));
3025 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3026 if (expected.immediate() == sentinel) {
3027 // Don't worry about adapting arguments for builtins that
3028 // don't want that done. Skip adaption code by making it look
3029 // like we have a match between expected and actual number of
3030 // arguments.
3031 definitely_matches = true;
3032 } else {
3033 li(a2, Operand(expected.immediate()));
3034 }
3035 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003036 } else if (actual.is_immediate()) {
3037 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3038 li(a0, Operand(actual.immediate()));
Steve Block6ded16b2010-05-10 14:33:55 +01003039 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003040 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
Steve Block6ded16b2010-05-10 14:33:55 +01003041 }
3042
3043 if (!definitely_matches) {
3044 if (!code_constant.is_null()) {
3045 li(a3, Operand(code_constant));
3046 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3047 }
3048
Steve Block44f0eee2011-05-26 01:26:41 +01003049 Handle<Code> adaptor =
3050 isolate()->builtins()->ArgumentsAdaptorTrampoline();
Steve Block6ded16b2010-05-10 14:33:55 +01003051 if (flag == CALL_FUNCTION) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003052 call_wrapper.BeforeCall(CallSize(adaptor));
Ben Murdoch257744e2011-11-30 15:57:28 +00003053 SetCallKind(t1, call_kind);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003054 Call(adaptor);
Ben Murdoch257744e2011-11-30 15:57:28 +00003055 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003056 jmp(done);
Steve Block6ded16b2010-05-10 14:33:55 +01003057 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003058 SetCallKind(t1, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003059 Jump(adaptor, RelocInfo::CODE_TARGET);
Steve Block6ded16b2010-05-10 14:33:55 +01003060 }
3061 bind(&regular_invoke);
3062 }
3063}
3064
Steve Block44f0eee2011-05-26 01:26:41 +01003065
Steve Block6ded16b2010-05-10 14:33:55 +01003066void MacroAssembler::InvokeCode(Register code,
3067 const ParameterCount& expected,
3068 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003069 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003070 const CallWrapper& call_wrapper,
3071 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003072 Label done;
3073
Steve Block44f0eee2011-05-26 01:26:41 +01003074 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003075 call_wrapper, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003076 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003077 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003078 Call(code);
3079 } else {
3080 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch257744e2011-11-30 15:57:28 +00003081 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003082 Jump(code);
3083 }
3084 // Continue here if InvokePrologue does handle the invocation due to
3085 // mismatched parameter counts.
3086 bind(&done);
3087}
3088
3089
3090void MacroAssembler::InvokeCode(Handle<Code> code,
3091 const ParameterCount& expected,
3092 const ParameterCount& actual,
3093 RelocInfo::Mode rmode,
Ben Murdoch257744e2011-11-30 15:57:28 +00003094 InvokeFlag flag,
3095 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003096 Label done;
3097
Ben Murdoch257744e2011-11-30 15:57:28 +00003098 InvokePrologue(expected, actual, code, no_reg, &done, flag,
3099 NullCallWrapper(), call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003100 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003101 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003102 Call(code, rmode);
3103 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003104 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003105 Jump(code, rmode);
3106 }
3107 // Continue here if InvokePrologue does handle the invocation due to
3108 // mismatched parameter counts.
3109 bind(&done);
3110}
3111
3112
3113void MacroAssembler::InvokeFunction(Register function,
3114 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003115 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003116 const CallWrapper& call_wrapper,
3117 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003118 // Contract with called JS functions requires that function is passed in a1.
3119 ASSERT(function.is(a1));
3120 Register expected_reg = a2;
3121 Register code_reg = a3;
3122
3123 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3124 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3125 lw(expected_reg,
3126 FieldMemOperand(code_reg,
3127 SharedFunctionInfo::kFormalParameterCountOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003128 sra(expected_reg, expected_reg, kSmiTagSize);
3129 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003130
3131 ParameterCount expected(expected_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003132 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003133}
3134
3135
3136void MacroAssembler::InvokeFunction(JSFunction* function,
3137 const ParameterCount& actual,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003138 InvokeFlag flag,
3139 CallKind call_kind) {
Steve Block44f0eee2011-05-26 01:26:41 +01003140 ASSERT(function->is_compiled());
3141
3142 // Get the function and setup the context.
3143 li(a1, Operand(Handle<JSFunction>(function)));
3144 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3145
3146 // Invoke the cached code.
3147 Handle<Code> code(function->code());
3148 ParameterCount expected(function->shared()->formal_parameter_count());
3149 if (V8::UseCrankshaft()) {
3150 UNIMPLEMENTED_MIPS();
3151 } else {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003152 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003153 }
3154}
3155
3156
3157void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3158 Register map,
3159 Register scratch,
3160 Label* fail) {
3161 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3162 IsInstanceJSObjectType(map, scratch, fail);
3163}
3164
3165
3166void MacroAssembler::IsInstanceJSObjectType(Register map,
3167 Register scratch,
3168 Label* fail) {
3169 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003170 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3171 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
Steve Block44f0eee2011-05-26 01:26:41 +01003172}
3173
3174
3175void MacroAssembler::IsObjectJSStringType(Register object,
3176 Register scratch,
3177 Label* fail) {
3178 ASSERT(kNotStringTag != 0);
3179
3180 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3181 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3182 And(scratch, scratch, Operand(kIsNotStringMask));
3183 Branch(fail, ne, scratch, Operand(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01003184}
3185
3186
3187// ---------------------------------------------------------------------------
3188// Support functions.
3189
Steve Block44f0eee2011-05-26 01:26:41 +01003190
3191void MacroAssembler::TryGetFunctionPrototype(Register function,
3192 Register result,
3193 Register scratch,
3194 Label* miss) {
3195 // Check that the receiver isn't a smi.
3196 JumpIfSmi(function, miss);
3197
3198 // Check that the function really is a function. Load map into result reg.
3199 GetObjectType(function, result, scratch);
3200 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3201
3202 // Make sure that the function has an instance prototype.
3203 Label non_instance;
3204 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3205 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3206 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3207
3208 // Get the prototype or initial map from the function.
3209 lw(result,
3210 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3211
3212 // If the prototype or initial map is the hole, don't return it and
3213 // simply miss the cache instead. This will allow us to allocate a
3214 // prototype object on-demand in the runtime system.
3215 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3216 Branch(miss, eq, result, Operand(t8));
3217
3218 // If the function does not have an initial map, we're done.
3219 Label done;
3220 GetObjectType(result, scratch, scratch);
3221 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3222
3223 // Get the prototype from the initial map.
3224 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3225 jmp(&done);
3226
3227 // Non-instance prototype: Fetch prototype from constructor field
3228 // in initial map.
3229 bind(&non_instance);
3230 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3231
3232 // All done.
3233 bind(&done);
3234}
Steve Block6ded16b2010-05-10 14:33:55 +01003235
3236
Steve Block44f0eee2011-05-26 01:26:41 +01003237void MacroAssembler::GetObjectType(Register object,
3238 Register map,
3239 Register type_reg) {
3240 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3241 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3242}
Steve Block6ded16b2010-05-10 14:33:55 +01003243
3244
3245// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003246// Runtime calls.
Steve Block6ded16b2010-05-10 14:33:55 +01003247
Andrei Popescu31002712010-02-23 13:46:05 +00003248void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
3249 Register r1, const Operand& r2) {
Steve Block6ded16b2010-05-10 14:33:55 +01003250 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003251 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
Andrei Popescu31002712010-02-23 13:46:05 +00003252}
3253
3254
Ben Murdoch257744e2011-11-30 15:57:28 +00003255MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
3256 Register r1, const Operand& r2) {
3257 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3258 Object* result;
3259 { MaybeObject* maybe_result = stub->TryGetCode();
3260 if (!maybe_result->ToObject(&result)) return maybe_result;
3261 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003262 Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
3263 kNoASTId, cond, r1, r2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003264 return result;
3265}
3266
3267
Steve Block44f0eee2011-05-26 01:26:41 +01003268void MacroAssembler::TailCallStub(CodeStub* stub) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003269 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Steve Block44f0eee2011-05-26 01:26:41 +01003270 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003271}
3272
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003273
Ben Murdoch257744e2011-11-30 15:57:28 +00003274MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
3275 Condition cond,
3276 Register r1,
3277 const Operand& r2) {
3278 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3279 Object* result;
3280 { MaybeObject* maybe_result = stub->TryGetCode();
3281 if (!maybe_result->ToObject(&result)) return maybe_result;
3282 }
3283 Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
3284 return result;
3285}
3286
3287
3288static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3289 return ref0.address() - ref1.address();
3290}
3291
3292
3293MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
3294 ExternalReference function, int stack_space) {
3295 ExternalReference next_address =
3296 ExternalReference::handle_scope_next_address();
3297 const int kNextOffset = 0;
3298 const int kLimitOffset = AddressOffset(
3299 ExternalReference::handle_scope_limit_address(),
3300 next_address);
3301 const int kLevelOffset = AddressOffset(
3302 ExternalReference::handle_scope_level_address(),
3303 next_address);
3304
3305 // Allocate HandleScope in callee-save registers.
3306 li(s3, Operand(next_address));
3307 lw(s0, MemOperand(s3, kNextOffset));
3308 lw(s1, MemOperand(s3, kLimitOffset));
3309 lw(s2, MemOperand(s3, kLevelOffset));
3310 Addu(s2, s2, Operand(1));
3311 sw(s2, MemOperand(s3, kLevelOffset));
3312
3313 // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3314 // (4 bytes) will be placed. This is also built into the Simulator.
3315 // Set up the pointer to the returned value (a0). It was allocated in
3316 // EnterExitFrame.
3317 addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3318
3319 // Native call returns to the DirectCEntry stub which redirects to the
3320 // return address pushed on stack (could have moved after GC).
3321 // DirectCEntry stub itself is generated early and never moves.
3322 DirectCEntryStub stub;
3323 stub.GenerateCall(this, function);
3324
3325 // As mentioned above, on MIPS a pointer is returned - we need to dereference
3326 // it to get the actual return value (which is also a pointer).
3327 lw(v0, MemOperand(v0));
3328
3329 Label promote_scheduled_exception;
3330 Label delete_allocated_handles;
3331 Label leave_exit_frame;
3332
3333 // If result is non-zero, dereference to get the result value
3334 // otherwise set it to undefined.
3335 Label skip;
3336 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3337 Branch(&skip, eq, v0, Operand(zero_reg));
3338 lw(a0, MemOperand(v0));
3339 bind(&skip);
3340 mov(v0, a0);
3341
3342 // No more valid handles (the result handle was the last one). Restore
3343 // previous handle scope.
3344 sw(s0, MemOperand(s3, kNextOffset));
3345 if (emit_debug_code()) {
3346 lw(a1, MemOperand(s3, kLevelOffset));
3347 Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3348 }
3349 Subu(s2, s2, Operand(1));
3350 sw(s2, MemOperand(s3, kLevelOffset));
3351 lw(at, MemOperand(s3, kLimitOffset));
3352 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3353
3354 // Check if the function scheduled an exception.
3355 bind(&leave_exit_frame);
3356 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3357 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3358 lw(t1, MemOperand(at));
3359 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3360 li(s0, Operand(stack_space));
3361 LeaveExitFrame(false, s0);
3362 Ret();
3363
3364 bind(&promote_scheduled_exception);
3365 MaybeObject* result = TryTailCallExternalReference(
3366 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
3367 if (result->IsFailure()) {
3368 return result;
3369 }
3370
3371 // HandleScope limit has changed. Delete allocated extensions.
3372 bind(&delete_allocated_handles);
3373 sw(s1, MemOperand(s3, kLimitOffset));
3374 mov(s0, v0);
3375 mov(a0, v0);
3376 PrepareCallCFunction(1, s1);
3377 li(a0, Operand(ExternalReference::isolate_address()));
3378 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3379 1);
3380 mov(v0, s0);
3381 jmp(&leave_exit_frame);
3382
3383 return result;
3384}
3385
Andrei Popescu31002712010-02-23 13:46:05 +00003386
Steve Block6ded16b2010-05-10 14:33:55 +01003387void MacroAssembler::IllegalOperation(int num_arguments) {
3388 if (num_arguments > 0) {
3389 addiu(sp, sp, num_arguments * kPointerSize);
3390 }
3391 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3392}
3393
3394
Steve Block44f0eee2011-05-26 01:26:41 +01003395void MacroAssembler::IndexFromHash(Register hash,
3396 Register index) {
3397 // If the hash field contains an array index pick it out. The assert checks
3398 // that the constants for the maximum number of digits for an array index
3399 // cached in the hash field and the number of bits reserved for it does not
3400 // conflict.
3401 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3402 (1 << String::kArrayIndexValueBits));
3403 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3404 // the low kHashShift bits.
3405 STATIC_ASSERT(kSmiTag == 0);
3406 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3407 sll(index, hash, kSmiTagSize);
3408}
3409
3410
3411void MacroAssembler::ObjectToDoubleFPURegister(Register object,
3412 FPURegister result,
3413 Register scratch1,
3414 Register scratch2,
3415 Register heap_number_map,
3416 Label* not_number,
3417 ObjectToDoubleFlags flags) {
3418 Label done;
3419 if ((flags & OBJECT_NOT_SMI) == 0) {
3420 Label not_smi;
3421 JumpIfNotSmi(object, &not_smi);
3422 // Remove smi tag and convert to double.
3423 sra(scratch1, object, kSmiTagSize);
3424 mtc1(scratch1, result);
3425 cvt_d_w(result, result);
3426 Branch(&done);
3427 bind(&not_smi);
3428 }
3429 // Check for heap number and load double value from it.
3430 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3431 Branch(not_number, ne, scratch1, Operand(heap_number_map));
3432
3433 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
3434 // If exponent is all ones the number is either a NaN or +/-Infinity.
3435 Register exponent = scratch1;
3436 Register mask_reg = scratch2;
3437 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
3438 li(mask_reg, HeapNumber::kExponentMask);
3439
3440 And(exponent, exponent, mask_reg);
3441 Branch(not_number, eq, exponent, Operand(mask_reg));
3442 }
3443 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
3444 bind(&done);
3445}
3446
3447
Steve Block44f0eee2011-05-26 01:26:41 +01003448void MacroAssembler::SmiToDoubleFPURegister(Register smi,
3449 FPURegister value,
3450 Register scratch1) {
3451 sra(scratch1, smi, kSmiTagSize);
3452 mtc1(scratch1, value);
3453 cvt_d_w(value, value);
3454}
3455
3456
Ben Murdoch257744e2011-11-30 15:57:28 +00003457void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3458 Register left,
3459 Register right,
3460 Register overflow_dst,
3461 Register scratch) {
3462 ASSERT(!dst.is(overflow_dst));
3463 ASSERT(!dst.is(scratch));
3464 ASSERT(!overflow_dst.is(scratch));
3465 ASSERT(!overflow_dst.is(left));
3466 ASSERT(!overflow_dst.is(right));
3467 ASSERT(!left.is(right));
3468
Ben Murdoch257744e2011-11-30 15:57:28 +00003469 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003470 mov(scratch, left); // Preserve left.
3471 addu(dst, left, right); // Left is overwritten.
3472 xor_(scratch, dst, scratch); // Original left.
3473 xor_(overflow_dst, dst, right);
3474 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003475 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003476 mov(scratch, right); // Preserve right.
3477 addu(dst, left, right); // Right is overwritten.
3478 xor_(scratch, dst, scratch); // Original right.
3479 xor_(overflow_dst, dst, left);
3480 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003481 } else {
3482 addu(dst, left, right);
3483 xor_(overflow_dst, dst, left);
3484 xor_(scratch, dst, right);
3485 and_(overflow_dst, scratch, overflow_dst);
3486 }
3487}
3488
3489
3490void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3491 Register left,
3492 Register right,
3493 Register overflow_dst,
3494 Register scratch) {
3495 ASSERT(!dst.is(overflow_dst));
3496 ASSERT(!dst.is(scratch));
3497 ASSERT(!overflow_dst.is(scratch));
3498 ASSERT(!overflow_dst.is(left));
3499 ASSERT(!overflow_dst.is(right));
3500 ASSERT(!left.is(right));
3501 ASSERT(!scratch.is(left));
3502 ASSERT(!scratch.is(right));
3503
Ben Murdoch257744e2011-11-30 15:57:28 +00003504 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003505 mov(scratch, left); // Preserve left.
3506 subu(dst, left, right); // Left is overwritten.
3507 xor_(overflow_dst, dst, scratch); // scratch is original left.
3508 xor_(scratch, scratch, right); // scratch is original left.
3509 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003510 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003511 mov(scratch, right); // Preserve right.
3512 subu(dst, left, right); // Right is overwritten.
3513 xor_(overflow_dst, dst, left);
3514 xor_(scratch, left, scratch); // Original right.
3515 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003516 } else {
3517 subu(dst, left, right);
3518 xor_(overflow_dst, dst, left);
3519 xor_(scratch, left, right);
3520 and_(overflow_dst, scratch, overflow_dst);
3521 }
3522}
3523
3524
Steve Block44f0eee2011-05-26 01:26:41 +01003525void MacroAssembler::CallRuntime(const Runtime::Function* f,
3526 int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003527 // All parameters are on the stack. v0 has the return value after call.
3528
3529 // If the expected number of arguments of the runtime function is
3530 // constant, we check that the actual number of arguments match the
3531 // expectation.
3532 if (f->nargs >= 0 && f->nargs != num_arguments) {
3533 IllegalOperation(num_arguments);
3534 return;
3535 }
3536
3537 // TODO(1236192): Most runtime routines don't need the number of
3538 // arguments passed in because it is constant. At some point we
3539 // should remove this need and make the runtime routine entry code
3540 // smarter.
3541 li(a0, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01003542 li(a1, Operand(ExternalReference(f, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01003543 CEntryStub stub(1);
3544 CallStub(&stub);
Andrei Popescu31002712010-02-23 13:46:05 +00003545}
3546
3547
Steve Block44f0eee2011-05-26 01:26:41 +01003548void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
3549 const Runtime::Function* function = Runtime::FunctionForId(id);
3550 li(a0, Operand(function->nargs));
3551 li(a1, Operand(ExternalReference(function, isolate())));
3552 CEntryStub stub(1);
3553 stub.SaveDoubles();
3554 CallStub(&stub);
3555}
3556
3557
Andrei Popescu31002712010-02-23 13:46:05 +00003558void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003559 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
3560}
3561
3562
Steve Block44f0eee2011-05-26 01:26:41 +01003563void MacroAssembler::CallExternalReference(const ExternalReference& ext,
3564 int num_arguments) {
3565 li(a0, Operand(num_arguments));
3566 li(a1, Operand(ext));
3567
3568 CEntryStub stub(1);
3569 CallStub(&stub);
3570}
3571
3572
Steve Block6ded16b2010-05-10 14:33:55 +01003573void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
3574 int num_arguments,
3575 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003576 // TODO(1236192): Most runtime routines don't need the number of
3577 // arguments passed in because it is constant. At some point we
3578 // should remove this need and make the runtime routine entry code
3579 // smarter.
3580 li(a0, Operand(num_arguments));
3581 JumpToExternalReference(ext);
Andrei Popescu31002712010-02-23 13:46:05 +00003582}
3583
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003584
Ben Murdoch257744e2011-11-30 15:57:28 +00003585MaybeObject* MacroAssembler::TryTailCallExternalReference(
3586 const ExternalReference& ext, int num_arguments, int result_size) {
3587 // TODO(1236192): Most runtime routines don't need the number of
3588 // arguments passed in because it is constant. At some point we
3589 // should remove this need and make the runtime routine entry code
3590 // smarter.
3591 li(a0, num_arguments);
3592 return TryJumpToExternalReference(ext);
3593}
3594
Andrei Popescu31002712010-02-23 13:46:05 +00003595
Steve Block6ded16b2010-05-10 14:33:55 +01003596void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
Andrei Popescu31002712010-02-23 13:46:05 +00003597 int num_arguments,
3598 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003599 TailCallExternalReference(ExternalReference(fid, isolate()),
3600 num_arguments,
3601 result_size);
Andrei Popescu31002712010-02-23 13:46:05 +00003602}
3603
3604
Steve Block6ded16b2010-05-10 14:33:55 +01003605void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Block44f0eee2011-05-26 01:26:41 +01003606 li(a1, Operand(builtin));
3607 CEntryStub stub(1);
3608 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003609}
3610
3611
Ben Murdoch257744e2011-11-30 15:57:28 +00003612MaybeObject* MacroAssembler::TryJumpToExternalReference(
3613 const ExternalReference& builtin) {
3614 li(a1, Operand(builtin));
3615 CEntryStub stub(1);
3616 return TryTailCallStub(&stub);
3617}
3618
3619
Andrei Popescu31002712010-02-23 13:46:05 +00003620void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Ben Murdoch257744e2011-11-30 15:57:28 +00003621 InvokeFlag flag,
3622 const CallWrapper& call_wrapper) {
Steve Block44f0eee2011-05-26 01:26:41 +01003623 GetBuiltinEntry(t9, id);
Ben Murdoch257744e2011-11-30 15:57:28 +00003624 if (flag == CALL_FUNCTION) {
3625 call_wrapper.BeforeCall(CallSize(t9));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003626 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01003627 Call(t9);
Ben Murdoch257744e2011-11-30 15:57:28 +00003628 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003629 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003630 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003631 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01003632 Jump(t9);
3633 }
3634}
3635
3636
3637void MacroAssembler::GetBuiltinFunction(Register target,
3638 Builtins::JavaScript id) {
3639 // Load the builtins object into target register.
3640 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3641 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
3642 // Load the JavaScript builtin function from the builtins object.
3643 lw(target, FieldMemOperand(target,
3644 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
Andrei Popescu31002712010-02-23 13:46:05 +00003645}
3646
3647
3648void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Steve Block44f0eee2011-05-26 01:26:41 +01003649 ASSERT(!target.is(a1));
3650 GetBuiltinFunction(a1, id);
3651 // Load the code entry point from the builtins object.
3652 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Andrei Popescu31002712010-02-23 13:46:05 +00003653}
3654
3655
3656void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3657 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003658 if (FLAG_native_code_counters && counter->Enabled()) {
3659 li(scratch1, Operand(value));
3660 li(scratch2, Operand(ExternalReference(counter)));
3661 sw(scratch1, MemOperand(scratch2));
3662 }
Andrei Popescu31002712010-02-23 13:46:05 +00003663}
3664
3665
3666void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3667 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003668 ASSERT(value > 0);
3669 if (FLAG_native_code_counters && counter->Enabled()) {
3670 li(scratch2, Operand(ExternalReference(counter)));
3671 lw(scratch1, MemOperand(scratch2));
3672 Addu(scratch1, scratch1, Operand(value));
3673 sw(scratch1, MemOperand(scratch2));
3674 }
Andrei Popescu31002712010-02-23 13:46:05 +00003675}
3676
3677
3678void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3679 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003680 ASSERT(value > 0);
3681 if (FLAG_native_code_counters && counter->Enabled()) {
3682 li(scratch2, Operand(ExternalReference(counter)));
3683 lw(scratch1, MemOperand(scratch2));
3684 Subu(scratch1, scratch1, Operand(value));
3685 sw(scratch1, MemOperand(scratch2));
3686 }
Andrei Popescu31002712010-02-23 13:46:05 +00003687}
3688
3689
Steve Block6ded16b2010-05-10 14:33:55 +01003690// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003691// Debugging.
Andrei Popescu31002712010-02-23 13:46:05 +00003692
3693void MacroAssembler::Assert(Condition cc, const char* msg,
3694 Register rs, Operand rt) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003695 if (emit_debug_code())
Steve Block44f0eee2011-05-26 01:26:41 +01003696 Check(cc, msg, rs, rt);
3697}
3698
3699
3700void MacroAssembler::AssertRegisterIsRoot(Register reg,
3701 Heap::RootListIndex index) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003702 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003703 LoadRoot(at, index);
3704 Check(eq, "Register did not match expected root", reg, Operand(at));
3705 }
3706}
3707
3708
3709void MacroAssembler::AssertFastElements(Register elements) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003710 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003711 ASSERT(!elements.is(at));
3712 Label ok;
Ben Murdoch257744e2011-11-30 15:57:28 +00003713 push(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01003714 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
3715 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3716 Branch(&ok, eq, elements, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003717 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
3718 Branch(&ok, eq, elements, Operand(at));
Steve Block44f0eee2011-05-26 01:26:41 +01003719 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
3720 Branch(&ok, eq, elements, Operand(at));
3721 Abort("JSObject with fast elements map has slow elements");
3722 bind(&ok);
Ben Murdoch257744e2011-11-30 15:57:28 +00003723 pop(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01003724 }
Andrei Popescu31002712010-02-23 13:46:05 +00003725}
3726
3727
3728void MacroAssembler::Check(Condition cc, const char* msg,
3729 Register rs, Operand rt) {
Steve Block44f0eee2011-05-26 01:26:41 +01003730 Label L;
3731 Branch(&L, cc, rs, rt);
3732 Abort(msg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003733 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01003734 bind(&L);
Andrei Popescu31002712010-02-23 13:46:05 +00003735}
3736
3737
3738void MacroAssembler::Abort(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01003739 Label abort_start;
3740 bind(&abort_start);
3741 // We want to pass the msg string like a smi to avoid GC
3742 // problems, however msg is not guaranteed to be aligned
3743 // properly. Instead, we pass an aligned pointer that is
3744 // a proper v8 smi, but also pass the alignment difference
3745 // from the real pointer as a smi.
3746 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
3747 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
3748 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
3749#ifdef DEBUG
3750 if (msg != NULL) {
3751 RecordComment("Abort message: ");
3752 RecordComment(msg);
3753 }
3754#endif
3755 // Disable stub call restrictions to always allow calls to abort.
3756 AllowStubCallsScope allow_scope(this, true);
3757
3758 li(a0, Operand(p0));
Ben Murdoch257744e2011-11-30 15:57:28 +00003759 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003760 li(a0, Operand(Smi::FromInt(p1 - p0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003761 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003762 CallRuntime(Runtime::kAbort, 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003763 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01003764 if (is_trampoline_pool_blocked()) {
3765 // If the calling code cares about the exact number of
3766 // instructions generated, we insert padding here to keep the size
3767 // of the Abort macro constant.
3768 // Currently in debug mode with debug_code enabled the number of
3769 // generated instructions is 14, so we use this as a maximum value.
3770 static const int kExpectedAbortInstructions = 14;
3771 int abort_instructions = InstructionsGeneratedSince(&abort_start);
3772 ASSERT(abort_instructions <= kExpectedAbortInstructions);
3773 while (abort_instructions++ < kExpectedAbortInstructions) {
3774 nop();
3775 }
3776 }
3777}
3778
3779
3780void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3781 if (context_chain_length > 0) {
3782 // Move up the chain of contexts to the context containing the slot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003783 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01003784 for (int i = 1; i < context_chain_length; i++) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003785 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01003786 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003787 } else {
3788 // Slot is in the current function context. Move it into the
3789 // destination register in case we store into it (the write barrier
3790 // cannot be allowed to destroy the context in esi).
3791 Move(dst, cp);
3792 }
Steve Block44f0eee2011-05-26 01:26:41 +01003793}
3794
3795
3796void MacroAssembler::LoadGlobalFunction(int index, Register function) {
3797 // Load the global or builtins object from the current context.
3798 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3799 // Load the global context from the global or builtins object.
3800 lw(function, FieldMemOperand(function,
3801 GlobalObject::kGlobalContextOffset));
3802 // Load the function from the global context.
3803 lw(function, MemOperand(function, Context::SlotOffset(index)));
3804}
3805
3806
3807void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
3808 Register map,
3809 Register scratch) {
3810 // Load the initial map. The global functions all have initial maps.
3811 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00003812 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003813 Label ok, fail;
Ben Murdoch257744e2011-11-30 15:57:28 +00003814 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +01003815 Branch(&ok);
3816 bind(&fail);
3817 Abort("Global functions must have initial map");
3818 bind(&ok);
3819 }
Andrei Popescu31002712010-02-23 13:46:05 +00003820}
3821
Steve Block6ded16b2010-05-10 14:33:55 +01003822
3823void MacroAssembler::EnterFrame(StackFrame::Type type) {
3824 addiu(sp, sp, -5 * kPointerSize);
Steve Block44f0eee2011-05-26 01:26:41 +01003825 li(t8, Operand(Smi::FromInt(type)));
3826 li(t9, Operand(CodeObject()));
Steve Block6ded16b2010-05-10 14:33:55 +01003827 sw(ra, MemOperand(sp, 4 * kPointerSize));
3828 sw(fp, MemOperand(sp, 3 * kPointerSize));
3829 sw(cp, MemOperand(sp, 2 * kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003830 sw(t8, MemOperand(sp, 1 * kPointerSize));
3831 sw(t9, MemOperand(sp, 0 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01003832 addiu(fp, sp, 3 * kPointerSize);
3833}
3834
3835
3836void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3837 mov(sp, fp);
3838 lw(fp, MemOperand(sp, 0 * kPointerSize));
3839 lw(ra, MemOperand(sp, 1 * kPointerSize));
3840 addiu(sp, sp, 2 * kPointerSize);
3841}
3842
3843
Ben Murdoch257744e2011-11-30 15:57:28 +00003844void MacroAssembler::EnterExitFrame(bool save_doubles,
3845 int stack_space) {
3846 // Setup the frame structure on the stack.
3847 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
3848 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
3849 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
Steve Block6ded16b2010-05-10 14:33:55 +01003850
Ben Murdoch257744e2011-11-30 15:57:28 +00003851 // This is how the stack will look:
3852 // fp + 2 (==kCallerSPDisplacement) - old stack's end
3853 // [fp + 1 (==kCallerPCOffset)] - saved old ra
3854 // [fp + 0 (==kCallerFPOffset)] - saved old fp
3855 // [fp - 1 (==kSPOffset)] - sp of the called function
3856 // [fp - 2 (==kCodeOffset)] - CodeObject
3857 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
3858 // new stack (will contain saved ra)
Steve Block6ded16b2010-05-10 14:33:55 +01003859
3860 // Save registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00003861 addiu(sp, sp, -4 * kPointerSize);
3862 sw(ra, MemOperand(sp, 3 * kPointerSize));
3863 sw(fp, MemOperand(sp, 2 * kPointerSize));
3864 addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
Steve Block6ded16b2010-05-10 14:33:55 +01003865
Ben Murdoch257744e2011-11-30 15:57:28 +00003866 if (emit_debug_code()) {
3867 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
3868 }
3869
3870 li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
3871 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003872
3873 // Save the frame pointer and the context in top.
Steve Block44f0eee2011-05-26 01:26:41 +01003874 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
3875 sw(fp, MemOperand(t8));
3876 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
3877 sw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003878
Ben Murdoch257744e2011-11-30 15:57:28 +00003879 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
Steve Block44f0eee2011-05-26 01:26:41 +01003880 if (save_doubles) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003881 // The stack must be allign to 0 modulo 8 for stores with sdc1.
Steve Block44f0eee2011-05-26 01:26:41 +01003882 ASSERT(kDoubleSize == frame_alignment);
Ben Murdoch257744e2011-11-30 15:57:28 +00003883 if (frame_alignment > 0) {
3884 ASSERT(IsPowerOf2(frame_alignment));
3885 And(sp, sp, Operand(-frame_alignment)); // Align stack.
3886 }
3887 int space = FPURegister::kNumRegisters * kDoubleSize;
Steve Block44f0eee2011-05-26 01:26:41 +01003888 Subu(sp, sp, Operand(space));
3889 // Remember: we only need to save every 2nd double FPU value.
3890 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
3891 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00003892 sdc1(reg, MemOperand(sp, i * kDoubleSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003893 }
Steve Block44f0eee2011-05-26 01:26:41 +01003894 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003895
3896 // Reserve place for the return address, stack space and an optional slot
3897 // (used by the DirectCEntryStub to hold the return value if a struct is
3898 // returned) and align the frame preparing for calling the runtime function.
3899 ASSERT(stack_space >= 0);
3900 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
3901 if (frame_alignment > 0) {
3902 ASSERT(IsPowerOf2(frame_alignment));
3903 And(sp, sp, Operand(-frame_alignment)); // Align stack.
3904 }
3905
3906 // Set the exit frame sp value to point just before the return address
3907 // location.
3908 addiu(at, sp, kPointerSize);
3909 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003910}
3911
3912
Ben Murdoch257744e2011-11-30 15:57:28 +00003913void MacroAssembler::LeaveExitFrame(bool save_doubles,
3914 Register argument_count) {
Steve Block44f0eee2011-05-26 01:26:41 +01003915 // Optionally restore all double registers.
3916 if (save_doubles) {
Steve Block44f0eee2011-05-26 01:26:41 +01003917 // Remember: we only need to restore every 2nd double FPU value.
Ben Murdoch257744e2011-11-30 15:57:28 +00003918 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003919 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
3920 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00003921 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003922 }
3923 }
3924
Steve Block6ded16b2010-05-10 14:33:55 +01003925 // Clear top frame.
Steve Block44f0eee2011-05-26 01:26:41 +01003926 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
3927 sw(zero_reg, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003928
3929 // Restore current context from top and clear it in debug mode.
Steve Block44f0eee2011-05-26 01:26:41 +01003930 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
3931 lw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003932#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01003933 sw(a3, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003934#endif
3935
3936 // Pop the arguments, restore registers, and return.
3937 mov(sp, fp); // Respect ABI stack constraint.
Ben Murdoch257744e2011-11-30 15:57:28 +00003938 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
3939 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
3940 addiu(sp, sp, 8);
3941 if (argument_count.is_valid()) {
3942 sll(t8, argument_count, kPointerSizeLog2);
3943 addu(sp, sp, t8);
3944 }
Steve Block6ded16b2010-05-10 14:33:55 +01003945}
3946
3947
Steve Block44f0eee2011-05-26 01:26:41 +01003948void MacroAssembler::InitializeNewString(Register string,
3949 Register length,
3950 Heap::RootListIndex map_index,
3951 Register scratch1,
3952 Register scratch2) {
3953 sll(scratch1, length, kSmiTagSize);
3954 LoadRoot(scratch2, map_index);
3955 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
3956 li(scratch1, Operand(String::kEmptyHashField));
3957 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
3958 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
3959}
3960
3961
3962int MacroAssembler::ActivationFrameAlignment() {
3963#if defined(V8_HOST_ARCH_MIPS)
3964 // Running on the real platform. Use the alignment as mandated by the local
3965 // environment.
3966 // Note: This will break if we ever start generating snapshots on one Mips
3967 // platform for another Mips platform with a different alignment.
3968 return OS::ActivationFrameAlignment();
3969#else // defined(V8_HOST_ARCH_MIPS)
3970 // If we are using the simulator then we should always align to the expected
3971 // alignment. As the simulator is used to generate snapshots we do not know
3972 // if the target platform will need alignment, so this is controlled from a
3973 // flag.
3974 return FLAG_sim_stack_alignment;
3975#endif // defined(V8_HOST_ARCH_MIPS)
3976}
3977
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003978
Ben Murdoch257744e2011-11-30 15:57:28 +00003979void MacroAssembler::AssertStackIsAligned() {
3980 if (emit_debug_code()) {
3981 const int frame_alignment = ActivationFrameAlignment();
3982 const int frame_alignment_mask = frame_alignment - 1;
Steve Block44f0eee2011-05-26 01:26:41 +01003983
Ben Murdoch257744e2011-11-30 15:57:28 +00003984 if (frame_alignment > kPointerSize) {
3985 Label alignment_as_expected;
3986 ASSERT(IsPowerOf2(frame_alignment));
3987 andi(at, sp, frame_alignment_mask);
3988 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
3989 // Don't use Check here, as it will call Runtime_Abort re-entering here.
3990 stop("Unexpected stack alignment");
3991 bind(&alignment_as_expected);
3992 }
Steve Block6ded16b2010-05-10 14:33:55 +01003993 }
Steve Block6ded16b2010-05-10 14:33:55 +01003994}
3995
Steve Block44f0eee2011-05-26 01:26:41 +01003996
Steve Block44f0eee2011-05-26 01:26:41 +01003997void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3998 Register reg,
3999 Register scratch,
4000 Label* not_power_of_two_or_zero) {
4001 Subu(scratch, reg, Operand(1));
4002 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4003 scratch, Operand(zero_reg));
4004 and_(at, scratch, reg); // In the delay slot.
4005 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4006}
4007
4008
4009void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4010 Register reg2,
4011 Label* on_not_both_smi) {
4012 STATIC_ASSERT(kSmiTag == 0);
4013 ASSERT_EQ(1, kSmiTagMask);
4014 or_(at, reg1, reg2);
4015 andi(at, at, kSmiTagMask);
4016 Branch(on_not_both_smi, ne, at, Operand(zero_reg));
4017}
4018
4019
4020void MacroAssembler::JumpIfEitherSmi(Register reg1,
4021 Register reg2,
4022 Label* on_either_smi) {
4023 STATIC_ASSERT(kSmiTag == 0);
4024 ASSERT_EQ(1, kSmiTagMask);
4025 // Both Smi tags must be 1 (not Smi).
4026 and_(at, reg1, reg2);
4027 andi(at, at, kSmiTagMask);
4028 Branch(on_either_smi, eq, at, Operand(zero_reg));
4029}
4030
4031
4032void MacroAssembler::AbortIfSmi(Register object) {
4033 STATIC_ASSERT(kSmiTag == 0);
4034 andi(at, object, kSmiTagMask);
4035 Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4036}
4037
4038
4039void MacroAssembler::AbortIfNotSmi(Register object) {
4040 STATIC_ASSERT(kSmiTag == 0);
4041 andi(at, object, kSmiTagMask);
4042 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4043}
4044
4045
Ben Murdoch257744e2011-11-30 15:57:28 +00004046void MacroAssembler::AbortIfNotString(Register object) {
4047 STATIC_ASSERT(kSmiTag == 0);
4048 And(t0, object, Operand(kSmiTagMask));
4049 Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4050 push(object);
4051 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4052 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4053 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4054 pop(object);
4055}
4056
4057
Steve Block44f0eee2011-05-26 01:26:41 +01004058void MacroAssembler::AbortIfNotRootValue(Register src,
4059 Heap::RootListIndex root_value_index,
4060 const char* message) {
4061 ASSERT(!src.is(at));
4062 LoadRoot(at, root_value_index);
4063 Assert(eq, message, src, Operand(at));
4064}
4065
4066
4067void MacroAssembler::JumpIfNotHeapNumber(Register object,
4068 Register heap_number_map,
4069 Register scratch,
4070 Label* on_not_heap_number) {
4071 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4072 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4073 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4074}
4075
4076
4077void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4078 Register first,
4079 Register second,
4080 Register scratch1,
4081 Register scratch2,
4082 Label* failure) {
4083 // Test that both first and second are sequential ASCII strings.
4084 // Assume that they are non-smis.
4085 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4086 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4087 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4088 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4089
4090 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4091 scratch2,
4092 scratch1,
4093 scratch2,
4094 failure);
4095}
4096
4097
4098void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4099 Register second,
4100 Register scratch1,
4101 Register scratch2,
4102 Label* failure) {
4103 // Check that neither is a smi.
4104 STATIC_ASSERT(kSmiTag == 0);
4105 And(scratch1, first, Operand(second));
4106 And(scratch1, scratch1, Operand(kSmiTagMask));
4107 Branch(failure, eq, scratch1, Operand(zero_reg));
4108 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4109 second,
4110 scratch1,
4111 scratch2,
4112 failure);
4113}
4114
4115
4116void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4117 Register first,
4118 Register second,
4119 Register scratch1,
4120 Register scratch2,
4121 Label* failure) {
4122 int kFlatAsciiStringMask =
4123 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4124 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4125 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4126 andi(scratch1, first, kFlatAsciiStringMask);
4127 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4128 andi(scratch2, second, kFlatAsciiStringMask);
4129 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4130}
4131
4132
4133void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4134 Register scratch,
4135 Label* failure) {
4136 int kFlatAsciiStringMask =
4137 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4138 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4139 And(scratch, type, Operand(kFlatAsciiStringMask));
4140 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4141}
4142
4143
4144static const int kRegisterPassedArguments = 4;
4145
4146void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
4147 int frame_alignment = ActivationFrameAlignment();
4148
Steve Block44f0eee2011-05-26 01:26:41 +01004149 // Up to four simple arguments are passed in registers a0..a3.
4150 // Those four arguments must have reserved argument slots on the stack for
4151 // mips, even though those argument slots are not normally used.
4152 // Remaining arguments are pushed on the stack, above (higher address than)
4153 // the argument slots.
4154 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
4155 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
4156 0 : num_arguments - kRegisterPassedArguments) +
4157 (StandardFrameConstants::kCArgsSlotsSize /
4158 kPointerSize);
4159 if (frame_alignment > kPointerSize) {
4160 // Make stack end at alignment and make room for num_arguments - 4 words
4161 // and the original value of sp.
4162 mov(scratch, sp);
4163 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4164 ASSERT(IsPowerOf2(frame_alignment));
4165 And(sp, sp, Operand(-frame_alignment));
4166 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4167 } else {
4168 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4169 }
4170}
4171
4172
4173void MacroAssembler::CallCFunction(ExternalReference function,
4174 int num_arguments) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004175 CallCFunctionHelper(no_reg, function, t8, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004176}
4177
4178
4179void MacroAssembler::CallCFunction(Register function,
4180 Register scratch,
4181 int num_arguments) {
4182 CallCFunctionHelper(function,
4183 ExternalReference::the_hole_value_location(isolate()),
4184 scratch,
4185 num_arguments);
4186}
4187
4188
4189void MacroAssembler::CallCFunctionHelper(Register function,
4190 ExternalReference function_reference,
4191 Register scratch,
4192 int num_arguments) {
Steve Block44f0eee2011-05-26 01:26:41 +01004193 // Make sure that the stack is aligned before calling a C function unless
4194 // running in the simulator. The simulator has its own alignment check which
4195 // provides more information.
4196 // The argument stots are presumed to have been set up by
4197 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4198
4199#if defined(V8_HOST_ARCH_MIPS)
4200 if (emit_debug_code()) {
4201 int frame_alignment = OS::ActivationFrameAlignment();
4202 int frame_alignment_mask = frame_alignment - 1;
4203 if (frame_alignment > kPointerSize) {
4204 ASSERT(IsPowerOf2(frame_alignment));
4205 Label alignment_as_expected;
4206 And(at, sp, Operand(frame_alignment_mask));
4207 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4208 // Don't use Check here, as it will call Runtime_Abort possibly
4209 // re-entering here.
4210 stop("Unexpected alignment in CallCFunction");
4211 bind(&alignment_as_expected);
4212 }
4213 }
4214#endif // V8_HOST_ARCH_MIPS
4215
4216 // Just call directly. The function called cannot cause a GC, or
4217 // allow preemption, so the return address in the link register
4218 // stays correct.
Steve Block44f0eee2011-05-26 01:26:41 +01004219
4220 if (function.is(no_reg)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004221 function = t9;
4222 li(function, Operand(function_reference));
4223 } else if (!function.is(t9)) {
4224 mov(t9, function);
Steve Block44f0eee2011-05-26 01:26:41 +01004225 function = t9;
4226 }
4227
4228 Call(function);
4229
4230 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
4231 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
4232 0 : num_arguments - kRegisterPassedArguments) +
4233 (StandardFrameConstants::kCArgsSlotsSize /
4234 kPointerSize);
4235
4236 if (OS::ActivationFrameAlignment() > kPointerSize) {
4237 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4238 } else {
4239 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4240 }
4241}
4242
4243
4244#undef BRANCH_ARGS_CHECK
4245
4246
Ben Murdoch257744e2011-11-30 15:57:28 +00004247void MacroAssembler::LoadInstanceDescriptors(Register map,
4248 Register descriptors) {
4249 lw(descriptors,
4250 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
4251 Label not_smi;
4252 JumpIfNotSmi(descriptors, &not_smi);
4253 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
4254 bind(&not_smi);
4255}
4256
4257
Steve Block44f0eee2011-05-26 01:26:41 +01004258CodePatcher::CodePatcher(byte* address, int instructions)
4259 : address_(address),
4260 instructions_(instructions),
4261 size_(instructions * Assembler::kInstrSize),
Ben Murdoch257744e2011-11-30 15:57:28 +00004262 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
Steve Block44f0eee2011-05-26 01:26:41 +01004263 // Create a new macro assembler pointing to the address of the code to patch.
4264 // The size is adjusted with kGap on order for the assembler to generate size
4265 // bytes of instructions without failing with buffer size constraints.
4266 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4267}
4268
4269
4270CodePatcher::~CodePatcher() {
4271 // Indicate that code has changed.
4272 CPU::FlushICache(address_, size_);
4273
4274 // Check that the code was patched as expected.
4275 ASSERT(masm_.pc_ == address_ + size_);
4276 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4277}
4278
4279
Ben Murdoch257744e2011-11-30 15:57:28 +00004280void CodePatcher::Emit(Instr instr) {
4281 masm()->emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +01004282}
4283
4284
4285void CodePatcher::Emit(Address addr) {
4286 masm()->emit(reinterpret_cast<Instr>(addr));
4287}
4288
4289
Ben Murdoch257744e2011-11-30 15:57:28 +00004290void CodePatcher::ChangeBranchCondition(Condition cond) {
4291 Instr instr = Assembler::instr_at(masm_.pc_);
4292 ASSERT(Assembler::IsBranch(instr));
4293 uint32_t opcode = Assembler::GetOpcodeField(instr);
4294 // Currently only the 'eq' and 'ne' cond values are supported and the simple
4295 // branch instructions (with opcode being the branch type).
4296 // There are some special cases (see Assembler::IsBranch()) so extending this
4297 // would be tricky.
4298 ASSERT(opcode == BEQ ||
4299 opcode == BNE ||
4300 opcode == BLEZ ||
4301 opcode == BGTZ ||
4302 opcode == BEQL ||
4303 opcode == BNEL ||
4304 opcode == BLEZL ||
4305 opcode == BGTZL);
4306 opcode = (cond == eq) ? BEQ : BNE;
4307 instr = (instr & ~kOpcodeMask) | opcode;
4308 masm_.emit(instr);
4309}
Steve Block44f0eee2011-05-26 01:26:41 +01004310
4311
Andrei Popescu31002712010-02-23 13:46:05 +00004312} } // namespace v8::internal
4313
Leon Clarkef7060e22010-06-03 12:02:55 +01004314#endif // V8_TARGET_ARCH_MIPS