blob: 5e8d676ace18d18810ea479bce0ac19c65abd110 [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Ben Murdoch257744e2011-11-30 15:57:28 +000028#include <limits.h> // For LONG_MIN, LONG_MAX.
Andrei Popescu31002712010-02-23 13:46:05 +000029
30#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_MIPS)
33
Andrei Popescu31002712010-02-23 13:46:05 +000034#include "bootstrapper.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000035#include "codegen.h"
Andrei Popescu31002712010-02-23 13:46:05 +000036#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
Ben Murdoch257744e2011-11-30 15:57:28 +000042MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size),
Andrei Popescu31002712010-02-23 13:46:05 +000044 generating_stub_(false),
Ben Murdoch257744e2011-11-30 15:57:28 +000045 allow_stub_calls_(true) {
46 if (isolate() != NULL) {
47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48 isolate());
49 }
Andrei Popescu31002712010-02-23 13:46:05 +000050}
51
52
Andrei Popescu31002712010-02-23 13:46:05 +000053void MacroAssembler::LoadRoot(Register destination,
54 Heap::RootListIndex index) {
Steve Block6ded16b2010-05-10 14:33:55 +010055 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000056}
57
Steve Block44f0eee2011-05-26 01:26:41 +010058
Andrei Popescu31002712010-02-23 13:46:05 +000059void MacroAssembler::LoadRoot(Register destination,
60 Heap::RootListIndex index,
61 Condition cond,
62 Register src1, const Operand& src2) {
Steve Block44f0eee2011-05-26 01:26:41 +010063 Branch(2, NegateCondition(cond), src1, src2);
Steve Block6ded16b2010-05-10 14:33:55 +010064 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000065}
66
67
Steve Block44f0eee2011-05-26 01:26:41 +010068void MacroAssembler::StoreRoot(Register source,
69 Heap::RootListIndex index) {
70 sw(source, MemOperand(s6, index << kPointerSizeLog2));
71}
72
73
74void MacroAssembler::StoreRoot(Register source,
75 Heap::RootListIndex index,
76 Condition cond,
77 Register src1, const Operand& src2) {
78 Branch(2, NegateCondition(cond), src1, src2);
79 sw(source, MemOperand(s6, index << kPointerSizeLog2));
80}
81
82
83void MacroAssembler::RecordWriteHelper(Register object,
84 Register address,
85 Register scratch) {
Ben Murdoch257744e2011-11-30 15:57:28 +000086 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +010087 // Check that the object is not in new space.
88 Label not_in_new_space;
89 InNewSpace(object, scratch, ne, &not_in_new_space);
90 Abort("new-space object passed to RecordWriteHelper");
91 bind(&not_in_new_space);
92 }
93
94 // Calculate page address: Clear bits from 0 to kPageSizeBits.
95 if (mips32r2) {
96 Ins(object, zero_reg, 0, kPageSizeBits);
97 } else {
98 // The Ins macro is slow on r1, so use shifts instead.
99 srl(object, object, kPageSizeBits);
100 sll(object, object, kPageSizeBits);
101 }
102
103 // Calculate region number.
104 Ext(address, address, Page::kRegionSizeLog2,
105 kPageSizeBits - Page::kRegionSizeLog2);
106
107 // Mark region dirty.
108 lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
109 li(at, Operand(1));
110 sllv(at, at, address);
111 or_(scratch, scratch, at);
112 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
113}
114
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000115
Ben Murdoch257744e2011-11-30 15:57:28 +0000116// Push and pop all registers that can hold pointers.
117void MacroAssembler::PushSafepointRegisters() {
118 // Safepoints expect a block of kNumSafepointRegisters values on the
119 // stack, so adjust the stack for unsaved registers.
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 ASSERT(num_unsaved >= 0);
122 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
123 MultiPush(kSafepointSavedRegisters);
124}
125
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000126
Ben Murdoch257744e2011-11-30 15:57:28 +0000127void MacroAssembler::PopSafepointRegisters() {
128 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
129 MultiPop(kSafepointSavedRegisters);
130 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
131}
132
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000133
Ben Murdoch257744e2011-11-30 15:57:28 +0000134void MacroAssembler::PushSafepointRegistersAndDoubles() {
135 PushSafepointRegisters();
136 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
137 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
138 FPURegister reg = FPURegister::FromAllocationIndex(i);
139 sdc1(reg, MemOperand(sp, i * kDoubleSize));
140 }
141}
142
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000143
Ben Murdoch257744e2011-11-30 15:57:28 +0000144void MacroAssembler::PopSafepointRegistersAndDoubles() {
145 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
146 FPURegister reg = FPURegister::FromAllocationIndex(i);
147 ldc1(reg, MemOperand(sp, i * kDoubleSize));
148 }
149 Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
150 PopSafepointRegisters();
151}
152
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000153
Ben Murdoch257744e2011-11-30 15:57:28 +0000154void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
155 Register dst) {
156 sw(src, SafepointRegistersAndDoublesSlot(dst));
157}
158
159
160void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
161 sw(src, SafepointRegisterSlot(dst));
162}
163
164
165void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
166 lw(dst, SafepointRegisterSlot(src));
167}
168
169
170int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
171 // The registers are pushed starting with the highest encoding,
172 // which means that lowest encodings are closest to the stack pointer.
173 return kSafepointRegisterStackIndexMap[reg_code];
174}
175
176
177MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
178 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
179}
180
181
182MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
183 // General purpose registers are pushed last on the stack.
184 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
185 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
186 return MemOperand(sp, doubles_size + register_offset);
187}
188
189
190
Steve Block44f0eee2011-05-26 01:26:41 +0100191
192void MacroAssembler::InNewSpace(Register object,
193 Register scratch,
194 Condition cc,
195 Label* branch) {
196 ASSERT(cc == eq || cc == ne);
197 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
198 Branch(branch, cc, scratch,
199 Operand(ExternalReference::new_space_start(isolate())));
200}
201
202
203// Will clobber 4 registers: object, scratch0, scratch1, at. The
204// register 'object' contains a heap object pointer. The heap object
205// tag is shifted away.
206void MacroAssembler::RecordWrite(Register object,
207 Operand offset,
208 Register scratch0,
209 Register scratch1) {
210 // The compiled code assumes that record write doesn't change the
211 // context register, so we check that none of the clobbered
212 // registers are cp.
213 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
214
215 Label done;
216
217 // First, test that the object is not in the new space. We cannot set
218 // region marks for new space pages.
219 InNewSpace(object, scratch0, eq, &done);
220
221 // Add offset into the object.
222 Addu(scratch0, object, offset);
223
224 // Record the actual write.
225 RecordWriteHelper(object, scratch0, scratch1);
226
227 bind(&done);
228
229 // Clobber all input registers when running with the debug-code flag
230 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000231 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100232 li(object, Operand(BitCast<int32_t>(kZapValue)));
233 li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
234 li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
235 }
236}
237
238
239// Will clobber 4 registers: object, address, scratch, ip. The
240// register 'object' contains a heap object pointer. The heap object
241// tag is shifted away.
242void MacroAssembler::RecordWrite(Register object,
243 Register address,
Andrei Popescu31002712010-02-23 13:46:05 +0000244 Register scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100245 // The compiled code assumes that record write doesn't change the
246 // context register, so we check that none of the clobbered
247 // registers are cp.
248 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
249
250 Label done;
251
252 // First, test that the object is not in the new space. We cannot set
253 // region marks for new space pages.
254 InNewSpace(object, scratch, eq, &done);
255
256 // Record the actual write.
257 RecordWriteHelper(object, address, scratch);
258
259 bind(&done);
260
261 // Clobber all input registers when running with the debug-code flag
262 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000263 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100264 li(object, Operand(BitCast<int32_t>(kZapValue)));
265 li(address, Operand(BitCast<int32_t>(kZapValue)));
266 li(scratch, Operand(BitCast<int32_t>(kZapValue)));
267 }
268}
269
270
271// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000272// Allocation support.
Steve Block44f0eee2011-05-26 01:26:41 +0100273
274
275void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
276 Register scratch,
277 Label* miss) {
278 Label same_contexts;
279
280 ASSERT(!holder_reg.is(scratch));
281 ASSERT(!holder_reg.is(at));
282 ASSERT(!scratch.is(at));
283
284 // Load current lexical context from the stack frame.
285 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
286 // In debug mode, make sure the lexical context is set.
287#ifdef DEBUG
288 Check(ne, "we should not have an empty lexical context",
289 scratch, Operand(zero_reg));
290#endif
291
292 // Load the global context of the current context.
293 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
294 lw(scratch, FieldMemOperand(scratch, offset));
295 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
296
297 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000298 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100299 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000300 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100301 // Read the first word and compare to the global_context_map.
302 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
303 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
304 Check(eq, "JSGlobalObject::global_context should be a global context.",
305 holder_reg, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +0000306 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100307 }
308
309 // Check if both contexts are the same.
310 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
311 Branch(&same_contexts, eq, scratch, Operand(at));
312
313 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000314 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100315 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000316 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100317 mov(holder_reg, at); // Move at to its holding place.
318 LoadRoot(at, Heap::kNullValueRootIndex);
319 Check(ne, "JSGlobalProxy::context() should not be null.",
320 holder_reg, Operand(at));
321
322 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
323 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
324 Check(eq, "JSGlobalObject::global_context should be a global context.",
325 holder_reg, Operand(at));
326 // Restore at is not needed. at is reloaded below.
Ben Murdoch257744e2011-11-30 15:57:28 +0000327 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100328 // Restore at to holder's context.
329 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
330 }
331
332 // Check that the security token in the calling global object is
333 // compatible with the security token in the receiving global
334 // object.
335 int token_offset = Context::kHeaderSize +
336 Context::SECURITY_TOKEN_INDEX * kPointerSize;
337
338 lw(scratch, FieldMemOperand(scratch, token_offset));
339 lw(at, FieldMemOperand(at, token_offset));
340 Branch(miss, ne, scratch, Operand(at));
341
342 bind(&same_contexts);
Andrei Popescu31002712010-02-23 13:46:05 +0000343}
344
345
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000346void MacroAssembler::LoadFromNumberDictionary(Label* miss,
347 Register elements,
348 Register key,
349 Register result,
350 Register reg0,
351 Register reg1,
352 Register reg2) {
353 // Register use:
354 //
355 // elements - holds the slow-case elements of the receiver on entry.
356 // Unchanged unless 'result' is the same register.
357 //
358 // key - holds the smi key on entry.
359 // Unchanged unless 'result' is the same register.
360 //
361 //
362 // result - holds the result on exit if the load succeeded.
363 // Allowed to be the same as 'key' or 'result'.
364 // Unchanged on bailout so 'key' or 'result' can be used
365 // in further computation.
366 //
367 // Scratch registers:
368 //
369 // reg0 - holds the untagged key on entry and holds the hash once computed.
370 //
371 // reg1 - Used to hold the capacity mask of the dictionary.
372 //
373 // reg2 - Used for the index into the dictionary.
374 // at - Temporary (avoid MacroAssembler instructions also using 'at').
375 Label done;
376
377 // Compute the hash code from the untagged key. This must be kept in sync
378 // with ComputeIntegerHash in utils.h.
379 //
380 // hash = ~hash + (hash << 15);
381 nor(reg1, reg0, zero_reg);
382 sll(at, reg0, 15);
383 addu(reg0, reg1, at);
384
385 // hash = hash ^ (hash >> 12);
386 srl(at, reg0, 12);
387 xor_(reg0, reg0, at);
388
389 // hash = hash + (hash << 2);
390 sll(at, reg0, 2);
391 addu(reg0, reg0, at);
392
393 // hash = hash ^ (hash >> 4);
394 srl(at, reg0, 4);
395 xor_(reg0, reg0, at);
396
397 // hash = hash * 2057;
398 li(reg1, Operand(2057));
399 mul(reg0, reg0, reg1);
400
401 // hash = hash ^ (hash >> 16);
402 srl(at, reg0, 16);
403 xor_(reg0, reg0, at);
404
405 // Compute the capacity mask.
406 lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
407 sra(reg1, reg1, kSmiTagSize);
408 Subu(reg1, reg1, Operand(1));
409
410 // Generate an unrolled loop that performs a few probes before giving up.
411 static const int kProbes = 4;
412 for (int i = 0; i < kProbes; i++) {
413 // Use reg2 for index calculations and keep the hash intact in reg0.
414 mov(reg2, reg0);
415 // Compute the masked index: (hash + i + i * i) & mask.
416 if (i > 0) {
417 Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
418 }
419 and_(reg2, reg2, reg1);
420
421 // Scale the index by multiplying by the element size.
422 ASSERT(NumberDictionary::kEntrySize == 3);
423 sll(at, reg2, 1); // 2x.
424 addu(reg2, reg2, at); // reg2 = reg2 * 3.
425
426 // Check if the key is identical to the name.
427 sll(at, reg2, kPointerSizeLog2);
428 addu(reg2, elements, at);
429
430 lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
431 if (i != kProbes - 1) {
432 Branch(&done, eq, key, Operand(at));
433 } else {
434 Branch(miss, ne, key, Operand(at));
435 }
436 }
437
438 bind(&done);
439 // Check that the value is a normal property.
440 // reg2: elements + (index * kPointerSize).
441 const int kDetailsOffset =
442 NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
443 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
444 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
445 Branch(miss, ne, at, Operand(zero_reg));
446
447 // Get the value at the masked, scaled index and return.
448 const int kValueOffset =
449 NumberDictionary::kElementsStartOffset + kPointerSize;
450 lw(result, FieldMemOperand(reg2, kValueOffset));
451}
452
453
Andrei Popescu31002712010-02-23 13:46:05 +0000454// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000455// Instruction macros.
Andrei Popescu31002712010-02-23 13:46:05 +0000456
Andrei Popescu31002712010-02-23 13:46:05 +0000457void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
458 if (rt.is_reg()) {
459 addu(rd, rs, rt.rm());
460 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100461 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000462 addiu(rd, rs, rt.imm32_);
463 } else {
464 // li handles the relocation.
465 ASSERT(!rs.is(at));
466 li(at, rt);
467 addu(rd, rs, at);
468 }
469 }
470}
471
472
Steve Block44f0eee2011-05-26 01:26:41 +0100473void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
474 if (rt.is_reg()) {
475 subu(rd, rs, rt.rm());
476 } else {
477 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
478 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
479 } else {
480 // li handles the relocation.
481 ASSERT(!rs.is(at));
482 li(at, rt);
483 subu(rd, rs, at);
484 }
485 }
486}
487
488
Andrei Popescu31002712010-02-23 13:46:05 +0000489void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
490 if (rt.is_reg()) {
491 mul(rd, rs, rt.rm());
492 } else {
493 // li handles the relocation.
494 ASSERT(!rs.is(at));
495 li(at, rt);
496 mul(rd, rs, at);
497 }
498}
499
500
501void MacroAssembler::Mult(Register rs, const Operand& rt) {
502 if (rt.is_reg()) {
503 mult(rs, rt.rm());
504 } else {
505 // li handles the relocation.
506 ASSERT(!rs.is(at));
507 li(at, rt);
508 mult(rs, at);
509 }
510}
511
512
513void MacroAssembler::Multu(Register rs, const Operand& rt) {
514 if (rt.is_reg()) {
515 multu(rs, rt.rm());
516 } else {
517 // li handles the relocation.
518 ASSERT(!rs.is(at));
519 li(at, rt);
520 multu(rs, at);
521 }
522}
523
524
525void MacroAssembler::Div(Register rs, const Operand& rt) {
526 if (rt.is_reg()) {
527 div(rs, rt.rm());
528 } else {
529 // li handles the relocation.
530 ASSERT(!rs.is(at));
531 li(at, rt);
532 div(rs, at);
533 }
534}
535
536
537void MacroAssembler::Divu(Register rs, const Operand& rt) {
538 if (rt.is_reg()) {
539 divu(rs, rt.rm());
540 } else {
541 // li handles the relocation.
542 ASSERT(!rs.is(at));
543 li(at, rt);
544 divu(rs, at);
545 }
546}
547
548
549void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
550 if (rt.is_reg()) {
551 and_(rd, rs, rt.rm());
552 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100553 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000554 andi(rd, rs, rt.imm32_);
555 } else {
556 // li handles the relocation.
557 ASSERT(!rs.is(at));
558 li(at, rt);
559 and_(rd, rs, at);
560 }
561 }
562}
563
564
565void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
566 if (rt.is_reg()) {
567 or_(rd, rs, rt.rm());
568 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100569 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000570 ori(rd, rs, rt.imm32_);
571 } else {
572 // li handles the relocation.
573 ASSERT(!rs.is(at));
574 li(at, rt);
575 or_(rd, rs, at);
576 }
577 }
578}
579
580
581void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
582 if (rt.is_reg()) {
583 xor_(rd, rs, rt.rm());
584 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100585 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000586 xori(rd, rs, rt.imm32_);
587 } else {
588 // li handles the relocation.
589 ASSERT(!rs.is(at));
590 li(at, rt);
591 xor_(rd, rs, at);
592 }
593 }
594}
595
596
597void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
598 if (rt.is_reg()) {
599 nor(rd, rs, rt.rm());
600 } else {
601 // li handles the relocation.
602 ASSERT(!rs.is(at));
603 li(at, rt);
604 nor(rd, rs, at);
605 }
606}
607
608
Ben Murdoch257744e2011-11-30 15:57:28 +0000609void MacroAssembler::Neg(Register rs, const Operand& rt) {
610 ASSERT(rt.is_reg());
611 ASSERT(!at.is(rs));
612 ASSERT(!at.is(rt.rm()));
613 li(at, -1);
614 xor_(rs, rt.rm(), at);
615}
616
617
Andrei Popescu31002712010-02-23 13:46:05 +0000618void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
619 if (rt.is_reg()) {
620 slt(rd, rs, rt.rm());
621 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100622 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000623 slti(rd, rs, rt.imm32_);
624 } else {
625 // li handles the relocation.
626 ASSERT(!rs.is(at));
627 li(at, rt);
628 slt(rd, rs, at);
629 }
630 }
631}
632
633
634void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
635 if (rt.is_reg()) {
636 sltu(rd, rs, rt.rm());
637 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100638 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000639 sltiu(rd, rs, rt.imm32_);
640 } else {
641 // li handles the relocation.
642 ASSERT(!rs.is(at));
643 li(at, rt);
644 sltu(rd, rs, at);
645 }
646 }
647}
648
649
Steve Block44f0eee2011-05-26 01:26:41 +0100650void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
651 if (mips32r2) {
652 if (rt.is_reg()) {
653 rotrv(rd, rs, rt.rm());
654 } else {
655 rotr(rd, rs, rt.imm32_);
656 }
657 } else {
658 if (rt.is_reg()) {
659 subu(at, zero_reg, rt.rm());
660 sllv(at, rs, at);
661 srlv(rd, rs, rt.rm());
662 or_(rd, rd, at);
663 } else {
664 if (rt.imm32_ == 0) {
665 srl(rd, rs, 0);
666 } else {
667 srl(at, rs, rt.imm32_);
668 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
669 or_(rd, rd, at);
670 }
671 }
672 }
Andrei Popescu31002712010-02-23 13:46:05 +0000673}
674
675
Steve Block44f0eee2011-05-26 01:26:41 +0100676//------------Pseudo-instructions-------------
677
Andrei Popescu31002712010-02-23 13:46:05 +0000678void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
679 ASSERT(!j.is_reg());
Steve Block44f0eee2011-05-26 01:26:41 +0100680 BlockTrampolinePoolScope block_trampoline_pool(this);
681 if (!MustUseReg(j.rmode_) && !gen2instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000682 // Normal load of an immediate value which does not need Relocation Info.
683 if (is_int16(j.imm32_)) {
684 addiu(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100685 } else if (!(j.imm32_ & kHiMask)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000686 ori(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100687 } else if (!(j.imm32_ & kImm16Mask)) {
688 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
Andrei Popescu31002712010-02-23 13:46:05 +0000689 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100690 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
691 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000692 }
Steve Block44f0eee2011-05-26 01:26:41 +0100693 } else if (MustUseReg(j.rmode_) || gen2instr) {
694 if (MustUseReg(j.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000695 RecordRelocInfo(j.rmode_, j.imm32_);
696 }
697 // We need always the same number of instructions as we may need to patch
698 // this code to load another value which may need 2 instructions to load.
Ben Murdoch257744e2011-11-30 15:57:28 +0000699 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
700 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000701 }
702}
703
704
Andrei Popescu31002712010-02-23 13:46:05 +0000705void MacroAssembler::MultiPush(RegList regs) {
706 int16_t NumSaved = 0;
707 int16_t NumToPush = NumberOfBitsSet(regs);
708
709 addiu(sp, sp, -4 * NumToPush);
Steve Block6ded16b2010-05-10 14:33:55 +0100710 for (int16_t i = kNumRegisters; i > 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000711 if ((regs & (1 << i)) != 0) {
712 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
713 }
714 }
715}
716
717
718void MacroAssembler::MultiPushReversed(RegList regs) {
719 int16_t NumSaved = 0;
720 int16_t NumToPush = NumberOfBitsSet(regs);
721
722 addiu(sp, sp, -4 * NumToPush);
Steve Block6ded16b2010-05-10 14:33:55 +0100723 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000724 if ((regs & (1 << i)) != 0) {
725 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
726 }
727 }
728}
729
730
731void MacroAssembler::MultiPop(RegList regs) {
732 int16_t NumSaved = 0;
733
Steve Block6ded16b2010-05-10 14:33:55 +0100734 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000735 if ((regs & (1 << i)) != 0) {
736 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
737 }
738 }
739 addiu(sp, sp, 4 * NumSaved);
740}
741
742
743void MacroAssembler::MultiPopReversed(RegList regs) {
744 int16_t NumSaved = 0;
745
Steve Block6ded16b2010-05-10 14:33:55 +0100746 for (int16_t i = kNumRegisters; i > 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000747 if ((regs & (1 << i)) != 0) {
748 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
749 }
750 }
751 addiu(sp, sp, 4 * NumSaved);
752}
753
754
Steve Block44f0eee2011-05-26 01:26:41 +0100755void MacroAssembler::Ext(Register rt,
756 Register rs,
757 uint16_t pos,
758 uint16_t size) {
759 ASSERT(pos < 32);
760 ASSERT(pos + size < 32);
Andrei Popescu31002712010-02-23 13:46:05 +0000761
Steve Block44f0eee2011-05-26 01:26:41 +0100762 if (mips32r2) {
763 ext_(rt, rs, pos, size);
764 } else {
765 // Move rs to rt and shift it left then right to get the
766 // desired bitfield on the right side and zeroes on the left.
767 sll(rt, rs, 32 - (pos + size));
768 srl(rt, rt, 32 - size);
769 }
770}
771
772
773void MacroAssembler::Ins(Register rt,
774 Register rs,
775 uint16_t pos,
776 uint16_t size) {
777 ASSERT(pos < 32);
778 ASSERT(pos + size < 32);
779
780 if (mips32r2) {
781 ins_(rt, rs, pos, size);
782 } else {
783 ASSERT(!rt.is(t8) && !rs.is(t8));
784
785 srl(t8, rt, pos + size);
786 // The left chunk from rt that needs to
787 // be saved is on the right side of t8.
788 sll(at, t8, pos + size);
789 // The 'at' register now contains the left chunk on
790 // the left (proper position) and zeroes.
791 sll(t8, rt, 32 - pos);
792 // t8 now contains the right chunk on the left and zeroes.
793 srl(t8, t8, 32 - pos);
794 // t8 now contains the right chunk on
795 // the right (proper position) and zeroes.
796 or_(rt, at, t8);
797 // rt now contains the left and right chunks from the original rt
798 // in their proper position and zeroes in the middle.
799 sll(t8, rs, 32 - size);
800 // t8 now contains the chunk from rs on the left and zeroes.
801 srl(t8, t8, 32 - size - pos);
802 // t8 now contains the original chunk from rs in
803 // the middle (proper position).
804 or_(rt, rt, t8);
805 // rt now contains the result of the ins instruction in R2 mode.
806 }
807}
808
809
810void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
811 // Move the data from fs to t4.
812 mfc1(t4, fs);
813 return Cvt_d_uw(fd, t4);
814}
815
816
817void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
818 // Convert rs to a FP value in fd (and fd + 1).
819 // We do this by converting rs minus the MSB to avoid sign conversion,
820 // then adding 2^31-1 and 1 to the result.
821
822 ASSERT(!fd.is(f20));
823 ASSERT(!rs.is(t9));
824 ASSERT(!rs.is(t8));
825
Ben Murdoch257744e2011-11-30 15:57:28 +0000826 // Save rs's MSB to t8.
Steve Block44f0eee2011-05-26 01:26:41 +0100827 And(t8, rs, 0x80000000);
828 // Remove rs's MSB.
829 And(t9, rs, 0x7FFFFFFF);
Ben Murdoch257744e2011-11-30 15:57:28 +0000830 // Move t9 to fd.
Steve Block44f0eee2011-05-26 01:26:41 +0100831 mtc1(t9, fd);
832
833 // Convert fd to a real FP value.
834 cvt_d_w(fd, fd);
835
836 Label conversion_done;
837
838 // If rs's MSB was 0, it's done.
839 // Otherwise we need to add that to the FP register.
840 Branch(&conversion_done, eq, t8, Operand(zero_reg));
841
842 // First load 2^31 - 1 into f20.
843 Or(t9, zero_reg, 0x7FFFFFFF);
844 mtc1(t9, f20);
845
846 // Convert it to FP and add it to fd.
847 cvt_d_w(f20, f20);
848 add_d(fd, fd, f20);
849 // Now add 1.
850 Or(t9, zero_reg, 1);
851 mtc1(t9, f20);
852
853 cvt_d_w(f20, f20);
854 add_d(fd, fd, f20);
855 bind(&conversion_done);
856}
857
858
859void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
860 Trunc_uw_d(fs, t4);
861 mtc1(t4, fd);
862}
863
864
865void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
866 ASSERT(!fd.is(f22));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000867 ASSERT(!rs.is(t8));
Steve Block44f0eee2011-05-26 01:26:41 +0100868
869 // Load 2^31 into f22.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000870 Or(t8, zero_reg, 0x80000000);
871 Cvt_d_uw(f22, t8);
Steve Block44f0eee2011-05-26 01:26:41 +0100872
873 // Test if f22 > fd.
874 c(OLT, D, fd, f22);
875
876 Label simple_convert;
877 // If fd < 2^31 we can convert it normally.
878 bc1t(&simple_convert);
879
880 // First we subtract 2^31 from fd, then trunc it to rs
881 // and add 2^31 to rs.
882
883 sub_d(f22, fd, f22);
884 trunc_w_d(f22, f22);
885 mfc1(rs, f22);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000886 or_(rs, rs, t8);
Steve Block44f0eee2011-05-26 01:26:41 +0100887
888 Label done;
889 Branch(&done);
890 // Simple conversion.
891 bind(&simple_convert);
892 trunc_w_d(f22, fd);
893 mfc1(rs, f22);
894
895 bind(&done);
896}
897
898
899// Tries to get a signed int32 out of a double precision floating point heap
900// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
901// 32bits signed integer range.
902// This method implementation differs from the ARM version for performance
903// reasons.
904void MacroAssembler::ConvertToInt32(Register source,
905 Register dest,
906 Register scratch,
907 Register scratch2,
908 FPURegister double_scratch,
909 Label *not_int32) {
910 Label right_exponent, done;
911 // Get exponent word (ENDIAN issues).
912 lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
913 // Get exponent alone in scratch2.
914 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
915 // Load dest with zero. We use this either for the final shift or
916 // for the answer.
917 mov(dest, zero_reg);
918 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
919 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
920 // the exponent that we are fastest at and also the highest exponent we can
921 // handle here.
922 const uint32_t non_smi_exponent =
923 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
924 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
925 Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
926 // If the exponent is higher than that then go to not_int32 case. This
927 // catches numbers that don't fit in a signed int32, infinities and NaNs.
928 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
929
930 // We know the exponent is smaller than 30 (biased). If it is less than
931 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
932 // it rounds to zero.
933 const uint32_t zero_exponent =
934 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
935 Subu(scratch2, scratch2, Operand(zero_exponent));
936 // Dest already has a Smi zero.
937 Branch(&done, lt, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +0000938 if (!CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100939 // We have a shifted exponent between 0 and 30 in scratch2.
940 srl(dest, scratch2, HeapNumber::kExponentShift);
941 // We now have the exponent in dest. Subtract from 30 to get
942 // how much to shift down.
943 li(at, Operand(30));
944 subu(dest, at, dest);
945 }
946 bind(&right_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +0000947 if (CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +0100948 CpuFeatures::Scope scope(FPU);
949 // MIPS FPU instructions implementing double precision to integer
950 // conversion using round to zero. Since the FP value was qualified
951 // above, the resulting integer should be a legal int32.
952 // The original 'Exponent' word is still in scratch.
953 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
954 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
955 trunc_w_d(double_scratch, double_scratch);
956 mfc1(dest, double_scratch);
957 } else {
958 // On entry, dest has final downshift, scratch has original sign/exp/mant.
959 // Save sign bit in top bit of dest.
960 And(scratch2, scratch, Operand(0x80000000));
961 Or(dest, dest, Operand(scratch2));
962 // Put back the implicit 1, just above mantissa field.
963 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
964
965 // Shift up the mantissa bits to take up the space the exponent used to
966 // take. We just orred in the implicit bit so that took care of one and
967 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
968 // distance. But we want to clear the sign-bit so shift one more bit
969 // left, then shift right one bit.
970 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
971 sll(scratch, scratch, shift_distance + 1);
972 srl(scratch, scratch, 1);
973
974 // Get the second half of the double. For some exponents we don't
975 // actually need this because the bits get shifted out again, but
976 // it's probably slower to test than just to do it.
977 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
978 // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
979 // The width of the field here is the same as the shift amount above.
980 const int field_width = shift_distance;
981 Ext(scratch2, scratch2, 32-shift_distance, field_width);
982 Ins(scratch, scratch2, 0, field_width);
983 // Move down according to the exponent.
984 srlv(scratch, scratch, dest);
985 // Prepare the negative version of our integer.
986 subu(scratch2, zero_reg, scratch);
987 // Trick to check sign bit (msb) held in dest, count leading zero.
988 // 0 indicates negative, save negative version with conditional move.
989 clz(dest, dest);
990 movz(scratch, scratch2, dest);
991 mov(dest, scratch);
992 }
993 bind(&done);
994}
995
996
Ben Murdoch257744e2011-11-30 15:57:28 +0000997void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
998 Register input_high,
999 Register input_low,
1000 Register scratch) {
1001 Label done, normal_exponent, restore_sign;
1002 // Extract the biased exponent in result.
1003 Ext(result,
1004 input_high,
1005 HeapNumber::kExponentShift,
1006 HeapNumber::kExponentBits);
1007
1008 // Check for Infinity and NaNs, which should return 0.
1009 Subu(scratch, result, HeapNumber::kExponentMask);
1010 movz(result, zero_reg, scratch);
1011 Branch(&done, eq, scratch, Operand(zero_reg));
1012
1013 // Express exponent as delta to (number of mantissa bits + 31).
1014 Subu(result,
1015 result,
1016 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1017
1018 // If the delta is strictly positive, all bits would be shifted away,
1019 // which means that we can return 0.
1020 Branch(&normal_exponent, le, result, Operand(zero_reg));
1021 mov(result, zero_reg);
1022 Branch(&done);
1023
1024 bind(&normal_exponent);
1025 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1026 // Calculate shift.
1027 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1028
1029 // Save the sign.
1030 Register sign = result;
1031 result = no_reg;
1032 And(sign, input_high, Operand(HeapNumber::kSignMask));
1033
1034 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1035 // to check for this specific case.
1036 Label high_shift_needed, high_shift_done;
1037 Branch(&high_shift_needed, lt, scratch, Operand(32));
1038 mov(input_high, zero_reg);
1039 Branch(&high_shift_done);
1040 bind(&high_shift_needed);
1041
1042 // Set the implicit 1 before the mantissa part in input_high.
1043 Or(input_high,
1044 input_high,
1045 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1046 // Shift the mantissa bits to the correct position.
1047 // We don't need to clear non-mantissa bits as they will be shifted away.
1048 // If they weren't, it would mean that the answer is in the 32bit range.
1049 sllv(input_high, input_high, scratch);
1050
1051 bind(&high_shift_done);
1052
1053 // Replace the shifted bits with bits from the lower mantissa word.
1054 Label pos_shift, shift_done;
1055 li(at, 32);
1056 subu(scratch, at, scratch);
1057 Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1058
1059 // Negate scratch.
1060 Subu(scratch, zero_reg, scratch);
1061 sllv(input_low, input_low, scratch);
1062 Branch(&shift_done);
1063
1064 bind(&pos_shift);
1065 srlv(input_low, input_low, scratch);
1066
1067 bind(&shift_done);
1068 Or(input_high, input_high, Operand(input_low));
1069 // Restore sign if necessary.
1070 mov(scratch, sign);
1071 result = sign;
1072 sign = no_reg;
1073 Subu(result, zero_reg, input_high);
1074 movz(result, input_high, scratch);
1075 bind(&done);
1076}
1077
1078
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001079void MacroAssembler::EmitECMATruncate(Register result,
1080 FPURegister double_input,
1081 FPURegister single_scratch,
1082 Register scratch,
1083 Register input_high,
1084 Register input_low) {
1085 CpuFeatures::Scope scope(FPU);
1086 ASSERT(!input_high.is(result));
1087 ASSERT(!input_low.is(result));
1088 ASSERT(!input_low.is(input_high));
1089 ASSERT(!scratch.is(result) &&
1090 !scratch.is(input_high) &&
1091 !scratch.is(input_low));
1092 ASSERT(!single_scratch.is(double_input));
1093
1094 Label done;
1095 Label manual;
1096
1097 // Clear cumulative exception flags and save the FCSR.
1098 Register scratch2 = input_high;
1099 cfc1(scratch2, FCSR);
1100 ctc1(zero_reg, FCSR);
1101 // Try a conversion to a signed integer.
1102 trunc_w_d(single_scratch, double_input);
1103 mfc1(result, single_scratch);
1104 // Retrieve and restore the FCSR.
1105 cfc1(scratch, FCSR);
1106 ctc1(scratch2, FCSR);
1107 // Check for overflow and NaNs.
1108 And(scratch,
1109 scratch,
1110 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1111 // If we had no exceptions we are done.
1112 Branch(&done, eq, scratch, Operand(zero_reg));
1113
1114 // Load the double value and perform a manual truncation.
1115 Move(input_low, input_high, double_input);
1116 EmitOutOfInt32RangeTruncate(result,
1117 input_high,
1118 input_low,
1119 scratch);
1120 bind(&done);
1121}
1122
1123
Ben Murdoch257744e2011-11-30 15:57:28 +00001124void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1125 Register src,
1126 int num_least_bits) {
1127 Ext(dst, src, kSmiTagSize, num_least_bits);
1128}
1129
1130
1131void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1132 Register src,
1133 int num_least_bits) {
1134 And(dst, src, Operand((1 << num_least_bits) - 1));
1135}
1136
1137
Steve Block44f0eee2011-05-26 01:26:41 +01001138// Emulated condtional branches do not emit a nop in the branch delay slot.
1139//
1140// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1141#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1142 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1143 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1144
1145
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001146bool MacroAssembler::UseAbsoluteCodePointers() {
1147 if (is_trampoline_emitted()) {
1148 return true;
1149 } else {
1150 return false;
1151 }
1152}
1153
1154
Steve Block44f0eee2011-05-26 01:26:41 +01001155void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001156 BranchShort(offset, bdslot);
1157}
1158
1159
1160void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1161 const Operand& rt,
1162 BranchDelaySlot bdslot) {
1163 BranchShort(offset, cond, rs, rt, bdslot);
1164}
1165
1166
1167void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1168 bool is_label_near = is_near(L);
1169 if (UseAbsoluteCodePointers() && !is_label_near) {
1170 Jr(L, bdslot);
1171 } else {
1172 BranchShort(L, bdslot);
1173 }
1174}
1175
1176
1177void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1178 const Operand& rt,
1179 BranchDelaySlot bdslot) {
1180 bool is_label_near = is_near(L);
1181 if (UseAbsoluteCodePointers() && !is_label_near) {
1182 Label skip;
1183 Condition neg_cond = NegateCondition(cond);
1184 BranchShort(&skip, neg_cond, rs, rt);
1185 Jr(L, bdslot);
1186 bind(&skip);
1187 } else {
1188 BranchShort(L, cond, rs, rt, bdslot);
1189 }
1190}
1191
1192
1193void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001194 b(offset);
1195
1196 // Emit a nop in the branch delay slot if required.
1197 if (bdslot == PROTECT)
1198 nop();
1199}
1200
1201
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001202void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1203 const Operand& rt,
1204 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001205 BRANCH_ARGS_CHECK(cond, rs, rt);
1206 ASSERT(!rs.is(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01001207 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001208 Register scratch = at;
1209
Andrei Popescu31002712010-02-23 13:46:05 +00001210 if (rt.is_reg()) {
1211 // We don't want any other register but scratch clobbered.
1212 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
1213 r2 = rt.rm_;
Steve Block44f0eee2011-05-26 01:26:41 +01001214 switch (cond) {
1215 case cc_always:
1216 b(offset);
1217 break;
1218 case eq:
1219 beq(rs, r2, offset);
1220 break;
1221 case ne:
1222 bne(rs, r2, offset);
1223 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001224 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001225 case greater:
1226 if (r2.is(zero_reg)) {
1227 bgtz(rs, offset);
1228 } else {
1229 slt(scratch, r2, rs);
1230 bne(scratch, zero_reg, offset);
1231 }
1232 break;
1233 case greater_equal:
1234 if (r2.is(zero_reg)) {
1235 bgez(rs, offset);
1236 } else {
1237 slt(scratch, rs, r2);
1238 beq(scratch, zero_reg, offset);
1239 }
1240 break;
1241 case less:
1242 if (r2.is(zero_reg)) {
1243 bltz(rs, offset);
1244 } else {
1245 slt(scratch, rs, r2);
1246 bne(scratch, zero_reg, offset);
1247 }
1248 break;
1249 case less_equal:
1250 if (r2.is(zero_reg)) {
1251 blez(rs, offset);
1252 } else {
1253 slt(scratch, r2, rs);
1254 beq(scratch, zero_reg, offset);
1255 }
1256 break;
Andrei Popescu31002712010-02-23 13:46:05 +00001257 // Unsigned comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001258 case Ugreater:
1259 if (r2.is(zero_reg)) {
1260 bgtz(rs, offset);
1261 } else {
1262 sltu(scratch, r2, rs);
1263 bne(scratch, zero_reg, offset);
1264 }
1265 break;
1266 case Ugreater_equal:
1267 if (r2.is(zero_reg)) {
1268 bgez(rs, offset);
1269 } else {
1270 sltu(scratch, rs, r2);
1271 beq(scratch, zero_reg, offset);
1272 }
1273 break;
1274 case Uless:
1275 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001276 // No code needs to be emitted.
1277 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001278 } else {
1279 sltu(scratch, rs, r2);
1280 bne(scratch, zero_reg, offset);
1281 }
1282 break;
1283 case Uless_equal:
1284 if (r2.is(zero_reg)) {
1285 b(offset);
1286 } else {
1287 sltu(scratch, r2, rs);
1288 beq(scratch, zero_reg, offset);
1289 }
1290 break;
1291 default:
1292 UNREACHABLE();
1293 }
1294 } else {
1295 // Be careful to always use shifted_branch_offset only just before the
1296 // branch instruction, as the location will be remember for patching the
1297 // target.
1298 switch (cond) {
1299 case cc_always:
1300 b(offset);
1301 break;
1302 case eq:
1303 // We don't want any other register but scratch clobbered.
1304 ASSERT(!scratch.is(rs));
1305 r2 = scratch;
1306 li(r2, rt);
1307 beq(rs, r2, offset);
1308 break;
1309 case ne:
1310 // We don't want any other register but scratch clobbered.
1311 ASSERT(!scratch.is(rs));
1312 r2 = scratch;
1313 li(r2, rt);
1314 bne(rs, r2, offset);
1315 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001316 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001317 case greater:
1318 if (rt.imm32_ == 0) {
1319 bgtz(rs, offset);
1320 } else {
1321 r2 = scratch;
1322 li(r2, rt);
1323 slt(scratch, r2, rs);
1324 bne(scratch, zero_reg, offset);
1325 }
1326 break;
1327 case greater_equal:
1328 if (rt.imm32_ == 0) {
1329 bgez(rs, offset);
1330 } else if (is_int16(rt.imm32_)) {
1331 slti(scratch, rs, rt.imm32_);
1332 beq(scratch, zero_reg, offset);
1333 } else {
1334 r2 = scratch;
1335 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001336 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001337 beq(scratch, zero_reg, offset);
1338 }
1339 break;
1340 case less:
1341 if (rt.imm32_ == 0) {
1342 bltz(rs, offset);
1343 } else if (is_int16(rt.imm32_)) {
1344 slti(scratch, rs, rt.imm32_);
1345 bne(scratch, zero_reg, offset);
1346 } else {
1347 r2 = scratch;
1348 li(r2, rt);
1349 slt(scratch, rs, r2);
1350 bne(scratch, zero_reg, offset);
1351 }
1352 break;
1353 case less_equal:
1354 if (rt.imm32_ == 0) {
1355 blez(rs, offset);
1356 } else {
1357 r2 = scratch;
1358 li(r2, rt);
1359 slt(scratch, r2, rs);
1360 beq(scratch, zero_reg, offset);
1361 }
1362 break;
1363 // Unsigned comparison.
1364 case Ugreater:
1365 if (rt.imm32_ == 0) {
1366 bgtz(rs, offset);
1367 } else {
1368 r2 = scratch;
1369 li(r2, rt);
1370 sltu(scratch, r2, rs);
1371 bne(scratch, zero_reg, offset);
1372 }
1373 break;
1374 case Ugreater_equal:
1375 if (rt.imm32_ == 0) {
1376 bgez(rs, offset);
1377 } else if (is_int16(rt.imm32_)) {
1378 sltiu(scratch, rs, rt.imm32_);
1379 beq(scratch, zero_reg, offset);
1380 } else {
1381 r2 = scratch;
1382 li(r2, rt);
1383 sltu(scratch, rs, r2);
1384 beq(scratch, zero_reg, offset);
1385 }
1386 break;
1387 case Uless:
1388 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001389 // No code needs to be emitted.
1390 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001391 } else if (is_int16(rt.imm32_)) {
1392 sltiu(scratch, rs, rt.imm32_);
1393 bne(scratch, zero_reg, offset);
1394 } else {
1395 r2 = scratch;
1396 li(r2, rt);
1397 sltu(scratch, rs, r2);
1398 bne(scratch, zero_reg, offset);
1399 }
1400 break;
1401 case Uless_equal:
1402 if (rt.imm32_ == 0) {
1403 b(offset);
1404 } else {
1405 r2 = scratch;
1406 li(r2, rt);
1407 sltu(scratch, r2, rs);
1408 beq(scratch, zero_reg, offset);
1409 }
1410 break;
1411 default:
1412 UNREACHABLE();
1413 }
Andrei Popescu31002712010-02-23 13:46:05 +00001414 }
Steve Block44f0eee2011-05-26 01:26:41 +01001415 // Emit a nop in the branch delay slot if required.
1416 if (bdslot == PROTECT)
1417 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001418}
1419
1420
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001421void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Andrei Popescu31002712010-02-23 13:46:05 +00001422 // We use branch_offset as an argument for the branch instructions to be sure
1423 // it is called just before generating the branch instruction, as needed.
1424
Steve Block44f0eee2011-05-26 01:26:41 +01001425 b(shifted_branch_offset(L, false));
Andrei Popescu31002712010-02-23 13:46:05 +00001426
Steve Block44f0eee2011-05-26 01:26:41 +01001427 // Emit a nop in the branch delay slot if required.
1428 if (bdslot == PROTECT)
1429 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001430}
1431
1432
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001433void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1434 const Operand& rt,
1435 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001436 BRANCH_ARGS_CHECK(cond, rs, rt);
1437
1438 int32_t offset;
1439 Register r2 = no_reg;
1440 Register scratch = at;
1441 if (rt.is_reg()) {
1442 r2 = rt.rm_;
1443 // Be careful to always use shifted_branch_offset only just before the
1444 // branch instruction, as the location will be remember for patching the
1445 // target.
1446 switch (cond) {
1447 case cc_always:
1448 offset = shifted_branch_offset(L, false);
1449 b(offset);
1450 break;
1451 case eq:
1452 offset = shifted_branch_offset(L, false);
1453 beq(rs, r2, offset);
1454 break;
1455 case ne:
1456 offset = shifted_branch_offset(L, false);
1457 bne(rs, r2, offset);
1458 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001459 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001460 case greater:
1461 if (r2.is(zero_reg)) {
1462 offset = shifted_branch_offset(L, false);
1463 bgtz(rs, offset);
1464 } else {
1465 slt(scratch, r2, rs);
1466 offset = shifted_branch_offset(L, false);
1467 bne(scratch, zero_reg, offset);
1468 }
1469 break;
1470 case greater_equal:
1471 if (r2.is(zero_reg)) {
1472 offset = shifted_branch_offset(L, false);
1473 bgez(rs, offset);
1474 } else {
1475 slt(scratch, rs, r2);
1476 offset = shifted_branch_offset(L, false);
1477 beq(scratch, zero_reg, offset);
1478 }
1479 break;
1480 case less:
1481 if (r2.is(zero_reg)) {
1482 offset = shifted_branch_offset(L, false);
1483 bltz(rs, offset);
1484 } else {
1485 slt(scratch, rs, r2);
1486 offset = shifted_branch_offset(L, false);
1487 bne(scratch, zero_reg, offset);
1488 }
1489 break;
1490 case less_equal:
1491 if (r2.is(zero_reg)) {
1492 offset = shifted_branch_offset(L, false);
1493 blez(rs, offset);
1494 } else {
1495 slt(scratch, r2, rs);
1496 offset = shifted_branch_offset(L, false);
1497 beq(scratch, zero_reg, offset);
1498 }
1499 break;
1500 // Unsigned comparison.
1501 case Ugreater:
1502 if (r2.is(zero_reg)) {
1503 offset = shifted_branch_offset(L, false);
1504 bgtz(rs, offset);
1505 } else {
1506 sltu(scratch, r2, rs);
1507 offset = shifted_branch_offset(L, false);
1508 bne(scratch, zero_reg, offset);
1509 }
1510 break;
1511 case Ugreater_equal:
1512 if (r2.is(zero_reg)) {
1513 offset = shifted_branch_offset(L, false);
1514 bgez(rs, offset);
1515 } else {
1516 sltu(scratch, rs, r2);
1517 offset = shifted_branch_offset(L, false);
1518 beq(scratch, zero_reg, offset);
1519 }
1520 break;
1521 case Uless:
1522 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001523 // No code needs to be emitted.
1524 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001525 } else {
1526 sltu(scratch, rs, r2);
1527 offset = shifted_branch_offset(L, false);
1528 bne(scratch, zero_reg, offset);
1529 }
1530 break;
1531 case Uless_equal:
1532 if (r2.is(zero_reg)) {
1533 offset = shifted_branch_offset(L, false);
1534 b(offset);
1535 } else {
1536 sltu(scratch, r2, rs);
1537 offset = shifted_branch_offset(L, false);
1538 beq(scratch, zero_reg, offset);
1539 }
1540 break;
1541 default:
1542 UNREACHABLE();
1543 }
1544 } else {
1545 // Be careful to always use shifted_branch_offset only just before the
1546 // branch instruction, as the location will be remember for patching the
1547 // target.
1548 switch (cond) {
1549 case cc_always:
1550 offset = shifted_branch_offset(L, false);
1551 b(offset);
1552 break;
1553 case eq:
1554 r2 = scratch;
1555 li(r2, rt);
1556 offset = shifted_branch_offset(L, false);
1557 beq(rs, r2, offset);
1558 break;
1559 case ne:
1560 r2 = scratch;
1561 li(r2, rt);
1562 offset = shifted_branch_offset(L, false);
1563 bne(rs, r2, offset);
1564 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001565 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001566 case greater:
1567 if (rt.imm32_ == 0) {
1568 offset = shifted_branch_offset(L, false);
1569 bgtz(rs, offset);
1570 } else {
1571 r2 = scratch;
1572 li(r2, rt);
1573 slt(scratch, r2, rs);
1574 offset = shifted_branch_offset(L, false);
1575 bne(scratch, zero_reg, offset);
1576 }
1577 break;
1578 case greater_equal:
1579 if (rt.imm32_ == 0) {
1580 offset = shifted_branch_offset(L, false);
1581 bgez(rs, offset);
1582 } else if (is_int16(rt.imm32_)) {
1583 slti(scratch, rs, rt.imm32_);
1584 offset = shifted_branch_offset(L, false);
1585 beq(scratch, zero_reg, offset);
1586 } else {
1587 r2 = scratch;
1588 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001589 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001590 offset = shifted_branch_offset(L, false);
1591 beq(scratch, zero_reg, offset);
1592 }
1593 break;
1594 case less:
1595 if (rt.imm32_ == 0) {
1596 offset = shifted_branch_offset(L, false);
1597 bltz(rs, offset);
1598 } else if (is_int16(rt.imm32_)) {
1599 slti(scratch, rs, rt.imm32_);
1600 offset = shifted_branch_offset(L, false);
1601 bne(scratch, zero_reg, offset);
1602 } else {
1603 r2 = scratch;
1604 li(r2, rt);
1605 slt(scratch, rs, r2);
1606 offset = shifted_branch_offset(L, false);
1607 bne(scratch, zero_reg, offset);
1608 }
1609 break;
1610 case less_equal:
1611 if (rt.imm32_ == 0) {
1612 offset = shifted_branch_offset(L, false);
1613 blez(rs, offset);
1614 } else {
1615 r2 = scratch;
1616 li(r2, rt);
1617 slt(scratch, r2, rs);
1618 offset = shifted_branch_offset(L, false);
1619 beq(scratch, zero_reg, offset);
1620 }
1621 break;
1622 // Unsigned comparison.
1623 case Ugreater:
1624 if (rt.imm32_ == 0) {
1625 offset = shifted_branch_offset(L, false);
1626 bgtz(rs, offset);
1627 } else {
1628 r2 = scratch;
1629 li(r2, rt);
1630 sltu(scratch, r2, rs);
1631 offset = shifted_branch_offset(L, false);
1632 bne(scratch, zero_reg, offset);
1633 }
1634 break;
1635 case Ugreater_equal:
1636 if (rt.imm32_ == 0) {
1637 offset = shifted_branch_offset(L, false);
1638 bgez(rs, offset);
1639 } else if (is_int16(rt.imm32_)) {
1640 sltiu(scratch, rs, rt.imm32_);
1641 offset = shifted_branch_offset(L, false);
1642 beq(scratch, zero_reg, offset);
1643 } else {
1644 r2 = scratch;
1645 li(r2, rt);
1646 sltu(scratch, rs, r2);
1647 offset = shifted_branch_offset(L, false);
1648 beq(scratch, zero_reg, offset);
1649 }
1650 break;
1651 case Uless:
1652 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001653 // No code needs to be emitted.
1654 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001655 } else if (is_int16(rt.imm32_)) {
1656 sltiu(scratch, rs, rt.imm32_);
1657 offset = shifted_branch_offset(L, false);
1658 bne(scratch, zero_reg, offset);
1659 } else {
1660 r2 = scratch;
1661 li(r2, rt);
1662 sltu(scratch, rs, r2);
1663 offset = shifted_branch_offset(L, false);
1664 bne(scratch, zero_reg, offset);
1665 }
1666 break;
1667 case Uless_equal:
1668 if (rt.imm32_ == 0) {
1669 offset = shifted_branch_offset(L, false);
1670 b(offset);
1671 } else {
1672 r2 = scratch;
1673 li(r2, rt);
1674 sltu(scratch, r2, rs);
1675 offset = shifted_branch_offset(L, false);
1676 beq(scratch, zero_reg, offset);
1677 }
1678 break;
1679 default:
1680 UNREACHABLE();
1681 }
1682 }
1683 // Check that offset could actually hold on an int16_t.
1684 ASSERT(is_int16(offset));
1685 // Emit a nop in the branch delay slot if required.
1686 if (bdslot == PROTECT)
1687 nop();
1688}
1689
1690
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001691void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
1692 BranchAndLinkShort(offset, bdslot);
1693}
1694
1695
1696void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1697 const Operand& rt,
1698 BranchDelaySlot bdslot) {
1699 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1700}
1701
1702
1703void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1704 bool is_label_near = is_near(L);
1705 if (UseAbsoluteCodePointers() && !is_label_near) {
1706 Jalr(L, bdslot);
1707 } else {
1708 BranchAndLinkShort(L, bdslot);
1709 }
1710}
1711
1712
1713void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1714 const Operand& rt,
1715 BranchDelaySlot bdslot) {
1716 bool is_label_near = is_near(L);
1717 if (UseAbsoluteCodePointers() && !is_label_near) {
1718 Label skip;
1719 Condition neg_cond = NegateCondition(cond);
1720 BranchShort(&skip, neg_cond, rs, rt);
1721 Jalr(L, bdslot);
1722 bind(&skip);
1723 } else {
1724 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1725 }
1726}
1727
1728
Andrei Popescu31002712010-02-23 13:46:05 +00001729// We need to use a bgezal or bltzal, but they can't be used directly with the
1730// slt instructions. We could use sub or add instead but we would miss overflow
1731// cases, so we keep slt and add an intermediate third instruction.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001732void MacroAssembler::BranchAndLinkShort(int16_t offset,
1733 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001734 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001735
Steve Block44f0eee2011-05-26 01:26:41 +01001736 // Emit a nop in the branch delay slot if required.
1737 if (bdslot == PROTECT)
1738 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001739}
1740
1741
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001742void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
1743 Register rs, const Operand& rt,
1744 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001745 BRANCH_ARGS_CHECK(cond, rs, rt);
Steve Block6ded16b2010-05-10 14:33:55 +01001746 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001747 Register scratch = at;
1748
Andrei Popescu31002712010-02-23 13:46:05 +00001749 if (rt.is_reg()) {
1750 r2 = rt.rm_;
1751 } else if (cond != cc_always) {
1752 r2 = scratch;
1753 li(r2, rt);
1754 }
1755
1756 switch (cond) {
1757 case cc_always:
Steve Block44f0eee2011-05-26 01:26:41 +01001758 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001759 break;
1760 case eq:
1761 bne(rs, r2, 2);
1762 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01001763 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001764 break;
1765 case ne:
1766 beq(rs, r2, 2);
1767 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01001768 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001769 break;
1770
Ben Murdoch257744e2011-11-30 15:57:28 +00001771 // Signed comparison.
Andrei Popescu31002712010-02-23 13:46:05 +00001772 case greater:
1773 slt(scratch, r2, rs);
1774 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001775 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001776 break;
1777 case greater_equal:
1778 slt(scratch, rs, r2);
1779 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001780 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001781 break;
1782 case less:
1783 slt(scratch, rs, r2);
1784 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001785 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001786 break;
1787 case less_equal:
1788 slt(scratch, r2, rs);
1789 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001790 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001791 break;
1792
1793 // Unsigned comparison.
1794 case Ugreater:
1795 sltu(scratch, r2, rs);
1796 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001797 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001798 break;
1799 case Ugreater_equal:
1800 sltu(scratch, rs, r2);
1801 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001802 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001803 break;
1804 case Uless:
1805 sltu(scratch, rs, r2);
1806 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001807 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001808 break;
1809 case Uless_equal:
1810 sltu(scratch, r2, rs);
1811 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001812 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001813 break;
1814
1815 default:
1816 UNREACHABLE();
1817 }
Steve Block44f0eee2011-05-26 01:26:41 +01001818 // Emit a nop in the branch delay slot if required.
1819 if (bdslot == PROTECT)
1820 nop();
1821}
1822
1823
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001824void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001825 bal(shifted_branch_offset(L, false));
1826
1827 // Emit a nop in the branch delay slot if required.
1828 if (bdslot == PROTECT)
1829 nop();
1830}
1831
1832
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001833void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
1834 const Operand& rt,
1835 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001836 BRANCH_ARGS_CHECK(cond, rs, rt);
1837
1838 int32_t offset;
1839 Register r2 = no_reg;
1840 Register scratch = at;
1841 if (rt.is_reg()) {
1842 r2 = rt.rm_;
1843 } else if (cond != cc_always) {
1844 r2 = scratch;
1845 li(r2, rt);
1846 }
1847
1848 switch (cond) {
1849 case cc_always:
1850 offset = shifted_branch_offset(L, false);
1851 bal(offset);
1852 break;
1853 case eq:
1854 bne(rs, r2, 2);
1855 nop();
1856 offset = shifted_branch_offset(L, false);
1857 bal(offset);
1858 break;
1859 case ne:
1860 beq(rs, r2, 2);
1861 nop();
1862 offset = shifted_branch_offset(L, false);
1863 bal(offset);
1864 break;
1865
Ben Murdoch257744e2011-11-30 15:57:28 +00001866 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001867 case greater:
1868 slt(scratch, r2, rs);
1869 addiu(scratch, scratch, -1);
1870 offset = shifted_branch_offset(L, false);
1871 bgezal(scratch, offset);
1872 break;
1873 case greater_equal:
1874 slt(scratch, rs, r2);
1875 addiu(scratch, scratch, -1);
1876 offset = shifted_branch_offset(L, false);
1877 bltzal(scratch, offset);
1878 break;
1879 case less:
1880 slt(scratch, rs, r2);
1881 addiu(scratch, scratch, -1);
1882 offset = shifted_branch_offset(L, false);
1883 bgezal(scratch, offset);
1884 break;
1885 case less_equal:
1886 slt(scratch, r2, rs);
1887 addiu(scratch, scratch, -1);
1888 offset = shifted_branch_offset(L, false);
1889 bltzal(scratch, offset);
1890 break;
1891
1892 // Unsigned comparison.
1893 case Ugreater:
1894 sltu(scratch, r2, rs);
1895 addiu(scratch, scratch, -1);
1896 offset = shifted_branch_offset(L, false);
1897 bgezal(scratch, offset);
1898 break;
1899 case Ugreater_equal:
1900 sltu(scratch, rs, r2);
1901 addiu(scratch, scratch, -1);
1902 offset = shifted_branch_offset(L, false);
1903 bltzal(scratch, offset);
1904 break;
1905 case Uless:
1906 sltu(scratch, rs, r2);
1907 addiu(scratch, scratch, -1);
1908 offset = shifted_branch_offset(L, false);
1909 bgezal(scratch, offset);
1910 break;
1911 case Uless_equal:
1912 sltu(scratch, r2, rs);
1913 addiu(scratch, scratch, -1);
1914 offset = shifted_branch_offset(L, false);
1915 bltzal(scratch, offset);
1916 break;
1917
1918 default:
1919 UNREACHABLE();
1920 }
1921
1922 // Check that offset could actually hold on an int16_t.
1923 ASSERT(is_int16(offset));
1924
1925 // Emit a nop in the branch delay slot if required.
1926 if (bdslot == PROTECT)
1927 nop();
1928}
1929
1930
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001931void MacroAssembler::Jump(Register target,
Steve Block44f0eee2011-05-26 01:26:41 +01001932 Condition cond,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001933 Register rs,
1934 const Operand& rt,
1935 BranchDelaySlot bd) {
1936 BlockTrampolinePoolScope block_trampoline_pool(this);
1937 if (cond == cc_always) {
1938 jr(target);
1939 } else {
1940 BRANCH_ARGS_CHECK(cond, rs, rt);
1941 Branch(2, NegateCondition(cond), rs, rt);
1942 jr(target);
1943 }
1944 // Emit a nop in the branch delay slot if required.
1945 if (bd == PROTECT)
1946 nop();
1947}
1948
1949
1950void MacroAssembler::Jump(intptr_t target,
1951 RelocInfo::Mode rmode,
1952 Condition cond,
1953 Register rs,
1954 const Operand& rt,
1955 BranchDelaySlot bd) {
1956 li(t9, Operand(target, rmode));
1957 Jump(t9, cond, rs, rt, bd);
1958}
1959
1960
1961void MacroAssembler::Jump(Address target,
1962 RelocInfo::Mode rmode,
1963 Condition cond,
1964 Register rs,
1965 const Operand& rt,
1966 BranchDelaySlot bd) {
1967 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1968 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
1969}
1970
1971
1972void MacroAssembler::Jump(Handle<Code> code,
1973 RelocInfo::Mode rmode,
1974 Condition cond,
1975 Register rs,
1976 const Operand& rt,
1977 BranchDelaySlot bd) {
1978 ASSERT(RelocInfo::IsCodeTarget(rmode));
1979 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
1980}
1981
1982
1983int MacroAssembler::CallSize(Register target,
1984 Condition cond,
1985 Register rs,
1986 const Operand& rt,
1987 BranchDelaySlot bd) {
1988 int size = 0;
1989
1990 if (cond == cc_always) {
1991 size += 1;
1992 } else {
1993 size += 3;
Steve Block44f0eee2011-05-26 01:26:41 +01001994 }
1995
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001996 if (bd == PROTECT)
1997 size += 1;
Steve Block44f0eee2011-05-26 01:26:41 +01001998
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001999 return size * kInstrSize;
2000}
Steve Block44f0eee2011-05-26 01:26:41 +01002001
Steve Block44f0eee2011-05-26 01:26:41 +01002002
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002003// Note: To call gcc-compiled C code on mips, you must call thru t9.
2004void MacroAssembler::Call(Register target,
2005 Condition cond,
2006 Register rs,
2007 const Operand& rt,
2008 BranchDelaySlot bd) {
2009 BlockTrampolinePoolScope block_trampoline_pool(this);
2010 Label start;
2011 bind(&start);
2012 if (cond == cc_always) {
2013 jalr(target);
2014 } else {
2015 BRANCH_ARGS_CHECK(cond, rs, rt);
2016 Branch(2, NegateCondition(cond), rs, rt);
2017 jalr(target);
Steve Block44f0eee2011-05-26 01:26:41 +01002018 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002019 // Emit a nop in the branch delay slot if required.
2020 if (bd == PROTECT)
2021 nop();
2022
2023 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2024 SizeOfCodeGeneratedSince(&start));
2025}
2026
2027
2028int MacroAssembler::CallSize(Address target,
2029 RelocInfo::Mode rmode,
2030 Condition cond,
2031 Register rs,
2032 const Operand& rt,
2033 BranchDelaySlot bd) {
2034 int size = CallSize(t9, cond, rs, rt, bd);
2035 return size + 2 * kInstrSize;
2036}
2037
2038
2039void MacroAssembler::Call(Address target,
2040 RelocInfo::Mode rmode,
2041 Condition cond,
2042 Register rs,
2043 const Operand& rt,
2044 BranchDelaySlot bd) {
2045 BlockTrampolinePoolScope block_trampoline_pool(this);
2046 Label start;
2047 bind(&start);
2048 int32_t target_int = reinterpret_cast<int32_t>(target);
2049 // Must record previous source positions before the
2050 // li() generates a new code target.
2051 positions_recorder()->WriteRecordedPositions();
2052 li(t9, Operand(target_int, rmode), true);
2053 Call(t9, cond, rs, rt, bd);
2054 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2055 SizeOfCodeGeneratedSince(&start));
2056}
2057
2058
2059int MacroAssembler::CallSize(Handle<Code> code,
2060 RelocInfo::Mode rmode,
2061 unsigned ast_id,
2062 Condition cond,
2063 Register rs,
2064 const Operand& rt,
2065 BranchDelaySlot bd) {
2066 return CallSize(reinterpret_cast<Address>(code.location()),
2067 rmode, cond, rs, rt, bd);
2068}
2069
2070
2071void MacroAssembler::Call(Handle<Code> code,
2072 RelocInfo::Mode rmode,
2073 unsigned ast_id,
2074 Condition cond,
2075 Register rs,
2076 const Operand& rt,
2077 BranchDelaySlot bd) {
2078 BlockTrampolinePoolScope block_trampoline_pool(this);
2079 Label start;
2080 bind(&start);
2081 ASSERT(RelocInfo::IsCodeTarget(rmode));
2082 if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2083 SetRecordedAstId(ast_id);
2084 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2085 }
2086 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2087 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
2088 SizeOfCodeGeneratedSince(&start));
2089}
2090
2091
2092void MacroAssembler::Ret(Condition cond,
2093 Register rs,
2094 const Operand& rt,
2095 BranchDelaySlot bd) {
2096 Jump(ra, cond, rs, rt, bd);
2097}
2098
2099
2100void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2101 BlockTrampolinePoolScope block_trampoline_pool(this);
2102
2103 uint32_t imm28;
2104 imm28 = jump_address(L);
2105 imm28 &= kImm28Mask;
2106 { BlockGrowBufferScope block_buf_growth(this);
2107 // Buffer growth (and relocation) must be blocked for internal references
2108 // until associated instructions are emitted and available to be patched.
2109 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2110 j(imm28);
2111 }
2112 // Emit a nop in the branch delay slot if required.
2113 if (bdslot == PROTECT)
2114 nop();
2115}
2116
2117
2118void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2119 BlockTrampolinePoolScope block_trampoline_pool(this);
2120
2121 uint32_t imm32;
2122 imm32 = jump_address(L);
2123 { BlockGrowBufferScope block_buf_growth(this);
2124 // Buffer growth (and relocation) must be blocked for internal references
2125 // until associated instructions are emitted and available to be patched.
2126 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2127 lui(at, (imm32 & kHiMask) >> kLuiShift);
2128 ori(at, at, (imm32 & kImm16Mask));
2129 }
2130 jr(at);
2131
2132 // Emit a nop in the branch delay slot if required.
2133 if (bdslot == PROTECT)
2134 nop();
2135}
2136
2137
2138void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2139 BlockTrampolinePoolScope block_trampoline_pool(this);
2140
2141 uint32_t imm32;
2142 imm32 = jump_address(L);
2143 { BlockGrowBufferScope block_buf_growth(this);
2144 // Buffer growth (and relocation) must be blocked for internal references
2145 // until associated instructions are emitted and available to be patched.
2146 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2147 lui(at, (imm32 & kHiMask) >> kLuiShift);
2148 ori(at, at, (imm32 & kImm16Mask));
2149 }
2150 jalr(at);
2151
2152 // Emit a nop in the branch delay slot if required.
2153 if (bdslot == PROTECT)
2154 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002155}
2156
2157
2158void MacroAssembler::DropAndRet(int drop,
2159 Condition cond,
2160 Register r1,
2161 const Operand& r2) {
2162 // This is a workaround to make sure only one branch instruction is
2163 // generated. It relies on Drop and Ret not creating branches if
2164 // cond == cc_always.
2165 Label skip;
2166 if (cond != cc_always) {
2167 Branch(&skip, NegateCondition(cond), r1, r2);
2168 }
2169
2170 Drop(drop);
2171 Ret();
2172
2173 if (cond != cc_always) {
2174 bind(&skip);
2175 }
2176}
2177
2178
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002179void MacroAssembler::Drop(int count,
2180 Condition cond,
2181 Register reg,
2182 const Operand& op) {
2183 if (count <= 0) {
2184 return;
2185 }
2186
2187 Label skip;
2188
2189 if (cond != al) {
2190 Branch(&skip, NegateCondition(cond), reg, op);
2191 }
2192
2193 addiu(sp, sp, count * kPointerSize);
2194
2195 if (cond != al) {
2196 bind(&skip);
2197 }
2198}
2199
2200
2201
Steve Block44f0eee2011-05-26 01:26:41 +01002202void MacroAssembler::Swap(Register reg1,
2203 Register reg2,
2204 Register scratch) {
2205 if (scratch.is(no_reg)) {
2206 Xor(reg1, reg1, Operand(reg2));
2207 Xor(reg2, reg2, Operand(reg1));
2208 Xor(reg1, reg1, Operand(reg2));
2209 } else {
2210 mov(scratch, reg1);
2211 mov(reg1, reg2);
2212 mov(reg2, scratch);
2213 }
Andrei Popescu31002712010-02-23 13:46:05 +00002214}
2215
2216
2217void MacroAssembler::Call(Label* target) {
Steve Block44f0eee2011-05-26 01:26:41 +01002218 BranchAndLink(target);
2219}
2220
2221
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002222void MacroAssembler::Push(Handle<Object> handle) {
2223 li(at, Operand(handle));
2224 push(at);
2225}
2226
2227
Steve Block6ded16b2010-05-10 14:33:55 +01002228#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002229
Steve Block44f0eee2011-05-26 01:26:41 +01002230void MacroAssembler::DebugBreak() {
2231 ASSERT(allow_stub_calls());
2232 mov(a0, zero_reg);
2233 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2234 CEntryStub ces(1);
2235 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2236}
2237
2238#endif // ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002239
2240
Andrei Popescu31002712010-02-23 13:46:05 +00002241// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002242// Exception handling.
Andrei Popescu31002712010-02-23 13:46:05 +00002243
2244void MacroAssembler::PushTryHandler(CodeLocation try_location,
2245 HandlerType type) {
Steve Block6ded16b2010-05-10 14:33:55 +01002246 // Adjust this code if not the case.
2247 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
2248 // The return address is passed in register ra.
2249 if (try_location == IN_JAVASCRIPT) {
2250 if (type == TRY_CATCH_HANDLER) {
2251 li(t0, Operand(StackHandler::TRY_CATCH));
2252 } else {
2253 li(t0, Operand(StackHandler::TRY_FINALLY));
2254 }
2255 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
2256 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
2257 && StackHandlerConstants::kPCOffset == 3 * kPointerSize
2258 && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2259 // Save the current handler as the next handler.
Steve Block44f0eee2011-05-26 01:26:41 +01002260 li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01002261 lw(t1, MemOperand(t2));
2262
2263 addiu(sp, sp, -StackHandlerConstants::kSize);
2264 sw(ra, MemOperand(sp, 12));
2265 sw(fp, MemOperand(sp, 8));
2266 sw(t0, MemOperand(sp, 4));
2267 sw(t1, MemOperand(sp, 0));
2268
2269 // Link this handler as the new current one.
2270 sw(sp, MemOperand(t2));
2271
2272 } else {
2273 // Must preserve a0-a3, and s0 (argv).
2274 ASSERT(try_location == IN_JS_ENTRY);
2275 ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
2276 && StackHandlerConstants::kFPOffset == 2 * kPointerSize
2277 && StackHandlerConstants::kPCOffset == 3 * kPointerSize
2278 && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2279
2280 // The frame pointer does not point to a JS frame so we save NULL
2281 // for fp. We expect the code throwing an exception to check fp
2282 // before dereferencing it to restore the context.
2283 li(t0, Operand(StackHandler::ENTRY));
2284
2285 // Save the current handler as the next handler.
Steve Block44f0eee2011-05-26 01:26:41 +01002286 li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01002287 lw(t1, MemOperand(t2));
2288
2289 addiu(sp, sp, -StackHandlerConstants::kSize);
2290 sw(ra, MemOperand(sp, 12));
2291 sw(zero_reg, MemOperand(sp, 8));
2292 sw(t0, MemOperand(sp, 4));
2293 sw(t1, MemOperand(sp, 0));
2294
2295 // Link this handler as the new current one.
2296 sw(sp, MemOperand(t2));
2297 }
Andrei Popescu31002712010-02-23 13:46:05 +00002298}
2299
2300
2301void MacroAssembler::PopTryHandler() {
Steve Block44f0eee2011-05-26 01:26:41 +01002302 ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
2303 pop(a1);
2304 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2305 li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
2306 sw(a1, MemOperand(at));
Andrei Popescu31002712010-02-23 13:46:05 +00002307}
2308
2309
Ben Murdoch257744e2011-11-30 15:57:28 +00002310void MacroAssembler::Throw(Register value) {
2311 // v0 is expected to hold the exception.
2312 Move(v0, value);
2313
2314 // Adjust this code if not the case.
2315 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
2316
2317 // Drop the sp to the top of the handler.
2318 li(a3, Operand(ExternalReference(Isolate::k_handler_address,
2319 isolate())));
2320 lw(sp, MemOperand(a3));
2321
2322 // Restore the next handler and frame pointer, discard handler state.
2323 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2324 pop(a2);
2325 sw(a2, MemOperand(a3));
2326 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2327 MultiPop(a3.bit() | fp.bit());
2328
2329 // Before returning we restore the context from the frame pointer if
2330 // not NULL. The frame pointer is NULL in the exception handler of a
2331 // JS entry frame.
2332 // Set cp to NULL if fp is NULL.
2333 Label done;
2334 Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
2335 mov(cp, zero_reg); // In branch delay slot.
2336 lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2337 bind(&done);
2338
2339#ifdef DEBUG
2340 // When emitting debug_code, set ra as return address for the jump.
2341 // 5 instructions: add: 1, pop: 2, jump: 2.
2342 const int kOffsetRaInstructions = 5;
2343 Label find_ra;
2344
2345 if (emit_debug_code()) {
2346 // Compute ra for the Jump(t9).
2347 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
2348
2349 // This branch-and-link sequence is needed to get the current PC on mips,
2350 // saved to the ra register. Then adjusted for instruction count.
2351 bal(&find_ra); // bal exposes branch-delay.
2352 nop(); // Branch delay slot nop.
2353 bind(&find_ra);
2354 addiu(ra, ra, kOffsetRaBytes);
2355 }
2356#endif
2357
2358 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
2359 pop(t9); // 2 instructions: lw, add sp.
2360 Jump(t9); // 2 instructions: jr, nop (in delay slot).
2361
2362 if (emit_debug_code()) {
2363 // Make sure that the expected number of instructions were generated.
2364 ASSERT_EQ(kOffsetRaInstructions,
2365 InstructionsGeneratedSince(&find_ra));
2366 }
2367}
2368
2369
2370void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2371 Register value) {
2372 // Adjust this code if not the case.
2373 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
2374
2375 // v0 is expected to hold the exception.
2376 Move(v0, value);
2377
2378 // Drop sp to the top stack handler.
2379 li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
2380 lw(sp, MemOperand(a3));
2381
2382 // Unwind the handlers until the ENTRY handler is found.
2383 Label loop, done;
2384 bind(&loop);
2385 // Load the type of the current stack handler.
2386 const int kStateOffset = StackHandlerConstants::kStateOffset;
2387 lw(a2, MemOperand(sp, kStateOffset));
2388 Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
2389 // Fetch the next handler in the list.
2390 const int kNextOffset = StackHandlerConstants::kNextOffset;
2391 lw(sp, MemOperand(sp, kNextOffset));
2392 jmp(&loop);
2393 bind(&done);
2394
2395 // Set the top handler address to next handler past the current ENTRY handler.
2396 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2397 pop(a2);
2398 sw(a2, MemOperand(a3));
2399
2400 if (type == OUT_OF_MEMORY) {
2401 // Set external caught exception to false.
2402 ExternalReference external_caught(
2403 Isolate::k_external_caught_exception_address, isolate());
2404 li(a0, Operand(false, RelocInfo::NONE));
2405 li(a2, Operand(external_caught));
2406 sw(a0, MemOperand(a2));
2407
2408 // Set pending exception and v0 to out of memory exception.
2409 Failure* out_of_memory = Failure::OutOfMemoryException();
2410 li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
2411 li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
2412 isolate())));
2413 sw(v0, MemOperand(a2));
2414 }
2415
2416 // Stack layout at this point. See also StackHandlerConstants.
2417 // sp -> state (ENTRY)
2418 // fp
2419 // ra
2420
2421 // Discard handler state (a2 is not used) and restore frame pointer.
2422 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
2423 MultiPop(a2.bit() | fp.bit()); // a2: discarded state.
2424 // Before returning we restore the context from the frame pointer if
2425 // not NULL. The frame pointer is NULL in the exception handler of a
2426 // JS entry frame.
2427 Label cp_null;
2428 Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
2429 mov(cp, zero_reg); // In the branch delay slot.
2430 lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2431 bind(&cp_null);
2432
2433#ifdef DEBUG
2434 // When emitting debug_code, set ra as return address for the jump.
2435 // 5 instructions: add: 1, pop: 2, jump: 2.
2436 const int kOffsetRaInstructions = 5;
2437 Label find_ra;
2438
2439 if (emit_debug_code()) {
2440 // Compute ra for the Jump(t9).
2441 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
2442
2443 // This branch-and-link sequence is needed to get the current PC on mips,
2444 // saved to the ra register. Then adjusted for instruction count.
2445 bal(&find_ra); // bal exposes branch-delay slot.
2446 nop(); // Branch delay slot nop.
2447 bind(&find_ra);
2448 addiu(ra, ra, kOffsetRaBytes);
2449 }
2450#endif
2451 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
2452 pop(t9); // 2 instructions: lw, add sp.
2453 Jump(t9); // 2 instructions: jr, nop (in delay slot).
2454
2455 if (emit_debug_code()) {
2456 // Make sure that the expected number of instructions were generated.
2457 ASSERT_EQ(kOffsetRaInstructions,
2458 InstructionsGeneratedSince(&find_ra));
2459 }
2460}
2461
2462
Steve Block44f0eee2011-05-26 01:26:41 +01002463void MacroAssembler::AllocateInNewSpace(int object_size,
2464 Register result,
2465 Register scratch1,
2466 Register scratch2,
2467 Label* gc_required,
2468 AllocationFlags flags) {
2469 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002470 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002471 // Trash the registers to simulate an allocation failure.
2472 li(result, 0x7091);
2473 li(scratch1, 0x7191);
2474 li(scratch2, 0x7291);
2475 }
2476 jmp(gc_required);
2477 return;
Steve Block6ded16b2010-05-10 14:33:55 +01002478 }
2479
Steve Block44f0eee2011-05-26 01:26:41 +01002480 ASSERT(!result.is(scratch1));
2481 ASSERT(!result.is(scratch2));
2482 ASSERT(!scratch1.is(scratch2));
2483 ASSERT(!scratch1.is(t9));
2484 ASSERT(!scratch2.is(t9));
2485 ASSERT(!result.is(t9));
Steve Block6ded16b2010-05-10 14:33:55 +01002486
Steve Block44f0eee2011-05-26 01:26:41 +01002487 // Make object size into bytes.
2488 if ((flags & SIZE_IN_WORDS) != 0) {
2489 object_size *= kPointerSize;
2490 }
2491 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01002492
Steve Block44f0eee2011-05-26 01:26:41 +01002493 // Check relative positions of allocation top and limit addresses.
2494 // ARM adds additional checks to make sure the ldm instruction can be
2495 // used. On MIPS we don't have ldm so we don't need additional checks either.
2496 ExternalReference new_space_allocation_top =
2497 ExternalReference::new_space_allocation_top_address(isolate());
2498 ExternalReference new_space_allocation_limit =
2499 ExternalReference::new_space_allocation_limit_address(isolate());
2500 intptr_t top =
2501 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2502 intptr_t limit =
2503 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2504 ASSERT((limit - top) == kPointerSize);
2505
2506 // Set up allocation top address and object size registers.
2507 Register topaddr = scratch1;
2508 Register obj_size_reg = scratch2;
2509 li(topaddr, Operand(new_space_allocation_top));
2510 li(obj_size_reg, Operand(object_size));
2511
2512 // This code stores a temporary value in t9.
2513 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2514 // Load allocation top into result and allocation limit into t9.
2515 lw(result, MemOperand(topaddr));
2516 lw(t9, MemOperand(topaddr, kPointerSize));
2517 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002518 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002519 // Assert that result actually contains top on entry. t9 is used
2520 // immediately below so this use of t9 does not cause difference with
2521 // respect to register content between debug and release mode.
2522 lw(t9, MemOperand(topaddr));
2523 Check(eq, "Unexpected allocation top", result, Operand(t9));
2524 }
2525 // Load allocation limit into t9. Result already contains allocation top.
2526 lw(t9, MemOperand(topaddr, limit - top));
2527 }
2528
2529 // Calculate new top and bail out if new space is exhausted. Use result
2530 // to calculate the new top.
2531 Addu(scratch2, result, Operand(obj_size_reg));
2532 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2533 sw(scratch2, MemOperand(topaddr));
2534
2535 // Tag object if requested.
2536 if ((flags & TAG_OBJECT) != 0) {
2537 Addu(result, result, Operand(kHeapObjectTag));
2538 }
Steve Block6ded16b2010-05-10 14:33:55 +01002539}
2540
2541
Steve Block44f0eee2011-05-26 01:26:41 +01002542void MacroAssembler::AllocateInNewSpace(Register object_size,
2543 Register result,
2544 Register scratch1,
2545 Register scratch2,
2546 Label* gc_required,
2547 AllocationFlags flags) {
2548 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002549 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002550 // Trash the registers to simulate an allocation failure.
2551 li(result, 0x7091);
2552 li(scratch1, 0x7191);
2553 li(scratch2, 0x7291);
2554 }
2555 jmp(gc_required);
2556 return;
2557 }
2558
2559 ASSERT(!result.is(scratch1));
2560 ASSERT(!result.is(scratch2));
2561 ASSERT(!scratch1.is(scratch2));
2562 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2563
2564 // Check relative positions of allocation top and limit addresses.
2565 // ARM adds additional checks to make sure the ldm instruction can be
2566 // used. On MIPS we don't have ldm so we don't need additional checks either.
2567 ExternalReference new_space_allocation_top =
2568 ExternalReference::new_space_allocation_top_address(isolate());
2569 ExternalReference new_space_allocation_limit =
2570 ExternalReference::new_space_allocation_limit_address(isolate());
2571 intptr_t top =
2572 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2573 intptr_t limit =
2574 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2575 ASSERT((limit - top) == kPointerSize);
2576
2577 // Set up allocation top address and object size registers.
2578 Register topaddr = scratch1;
2579 li(topaddr, Operand(new_space_allocation_top));
2580
2581 // This code stores a temporary value in t9.
2582 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2583 // Load allocation top into result and allocation limit into t9.
2584 lw(result, MemOperand(topaddr));
2585 lw(t9, MemOperand(topaddr, kPointerSize));
2586 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002587 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002588 // Assert that result actually contains top on entry. t9 is used
2589 // immediately below so this use of t9 does not cause difference with
2590 // respect to register content between debug and release mode.
2591 lw(t9, MemOperand(topaddr));
2592 Check(eq, "Unexpected allocation top", result, Operand(t9));
2593 }
2594 // Load allocation limit into t9. Result already contains allocation top.
2595 lw(t9, MemOperand(topaddr, limit - top));
2596 }
2597
2598 // Calculate new top and bail out if new space is exhausted. Use result
2599 // to calculate the new top. Object size may be in words so a shift is
2600 // required to get the number of bytes.
2601 if ((flags & SIZE_IN_WORDS) != 0) {
2602 sll(scratch2, object_size, kPointerSizeLog2);
2603 Addu(scratch2, result, scratch2);
2604 } else {
2605 Addu(scratch2, result, Operand(object_size));
2606 }
2607 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2608
2609 // Update allocation top. result temporarily holds the new top.
Ben Murdoch257744e2011-11-30 15:57:28 +00002610 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002611 And(t9, scratch2, Operand(kObjectAlignmentMask));
2612 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2613 }
2614 sw(scratch2, MemOperand(topaddr));
2615
2616 // Tag object if requested.
2617 if ((flags & TAG_OBJECT) != 0) {
2618 Addu(result, result, Operand(kHeapObjectTag));
2619 }
2620}
2621
2622
2623void MacroAssembler::UndoAllocationInNewSpace(Register object,
2624 Register scratch) {
2625 ExternalReference new_space_allocation_top =
2626 ExternalReference::new_space_allocation_top_address(isolate());
2627
2628 // Make sure the object has no tag before resetting top.
2629 And(object, object, Operand(~kHeapObjectTagMask));
2630#ifdef DEBUG
2631 // Check that the object un-allocated is below the current top.
2632 li(scratch, Operand(new_space_allocation_top));
2633 lw(scratch, MemOperand(scratch));
2634 Check(less, "Undo allocation of non allocated memory",
2635 object, Operand(scratch));
2636#endif
2637 // Write the address of the object to un-allocate as the current top.
2638 li(scratch, Operand(new_space_allocation_top));
2639 sw(object, MemOperand(scratch));
2640}
2641
2642
2643void MacroAssembler::AllocateTwoByteString(Register result,
2644 Register length,
2645 Register scratch1,
2646 Register scratch2,
2647 Register scratch3,
2648 Label* gc_required) {
2649 // Calculate the number of bytes needed for the characters in the string while
2650 // observing object alignment.
2651 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2652 sll(scratch1, length, 1); // Length in bytes, not chars.
2653 addiu(scratch1, scratch1,
2654 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2655 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2656
2657 // Allocate two-byte string in new space.
2658 AllocateInNewSpace(scratch1,
2659 result,
2660 scratch2,
2661 scratch3,
2662 gc_required,
2663 TAG_OBJECT);
2664
2665 // Set the map, length and hash field.
2666 InitializeNewString(result,
2667 length,
2668 Heap::kStringMapRootIndex,
2669 scratch1,
2670 scratch2);
2671}
2672
2673
2674void MacroAssembler::AllocateAsciiString(Register result,
2675 Register length,
2676 Register scratch1,
2677 Register scratch2,
2678 Register scratch3,
2679 Label* gc_required) {
2680 // Calculate the number of bytes needed for the characters in the string
2681 // while observing object alignment.
2682 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2683 ASSERT(kCharSize == 1);
2684 addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2685 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2686
2687 // Allocate ASCII string in new space.
2688 AllocateInNewSpace(scratch1,
2689 result,
2690 scratch2,
2691 scratch3,
2692 gc_required,
2693 TAG_OBJECT);
2694
2695 // Set the map, length and hash field.
2696 InitializeNewString(result,
2697 length,
2698 Heap::kAsciiStringMapRootIndex,
2699 scratch1,
2700 scratch2);
2701}
2702
2703
2704void MacroAssembler::AllocateTwoByteConsString(Register result,
2705 Register length,
2706 Register scratch1,
2707 Register scratch2,
2708 Label* gc_required) {
2709 AllocateInNewSpace(ConsString::kSize,
2710 result,
2711 scratch1,
2712 scratch2,
2713 gc_required,
2714 TAG_OBJECT);
2715 InitializeNewString(result,
2716 length,
2717 Heap::kConsStringMapRootIndex,
2718 scratch1,
2719 scratch2);
2720}
2721
2722
2723void MacroAssembler::AllocateAsciiConsString(Register result,
2724 Register length,
2725 Register scratch1,
2726 Register scratch2,
2727 Label* gc_required) {
2728 AllocateInNewSpace(ConsString::kSize,
2729 result,
2730 scratch1,
2731 scratch2,
2732 gc_required,
2733 TAG_OBJECT);
2734 InitializeNewString(result,
2735 length,
2736 Heap::kConsAsciiStringMapRootIndex,
2737 scratch1,
2738 scratch2);
2739}
2740
2741
2742// Allocates a heap number or jumps to the label if the young space is full and
2743// a scavenge is needed.
2744void MacroAssembler::AllocateHeapNumber(Register result,
2745 Register scratch1,
2746 Register scratch2,
2747 Register heap_number_map,
2748 Label* need_gc) {
2749 // Allocate an object in the heap for the heap number and tag it as a heap
2750 // object.
2751 AllocateInNewSpace(HeapNumber::kSize,
2752 result,
2753 scratch1,
2754 scratch2,
2755 need_gc,
2756 TAG_OBJECT);
2757
2758 // Store heap number map in the allocated object.
2759 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2760 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2761}
2762
2763
2764void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2765 FPURegister value,
2766 Register scratch1,
2767 Register scratch2,
2768 Label* gc_required) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002769 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
2770 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
Steve Block44f0eee2011-05-26 01:26:41 +01002771 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2772}
2773
2774
2775// Copies a fixed number of fields of heap objects from src to dst.
2776void MacroAssembler::CopyFields(Register dst,
2777 Register src,
2778 RegList temps,
2779 int field_count) {
2780 ASSERT((temps & dst.bit()) == 0);
2781 ASSERT((temps & src.bit()) == 0);
2782 // Primitive implementation using only one temporary register.
2783
2784 Register tmp = no_reg;
2785 // Find a temp register in temps list.
2786 for (int i = 0; i < kNumRegisters; i++) {
2787 if ((temps & (1 << i)) != 0) {
2788 tmp.code_ = i;
2789 break;
2790 }
2791 }
2792 ASSERT(!tmp.is(no_reg));
2793
2794 for (int i = 0; i < field_count; i++) {
2795 lw(tmp, FieldMemOperand(src, i * kPointerSize));
2796 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
2797 }
2798}
2799
2800
Ben Murdoch257744e2011-11-30 15:57:28 +00002801void MacroAssembler::CopyBytes(Register src,
2802 Register dst,
2803 Register length,
2804 Register scratch) {
2805 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2806
2807 // Align src before copying in word size chunks.
2808 bind(&align_loop);
2809 Branch(&done, eq, length, Operand(zero_reg));
2810 bind(&align_loop_1);
2811 And(scratch, src, kPointerSize - 1);
2812 Branch(&word_loop, eq, scratch, Operand(zero_reg));
2813 lbu(scratch, MemOperand(src));
2814 Addu(src, src, 1);
2815 sb(scratch, MemOperand(dst));
2816 Addu(dst, dst, 1);
2817 Subu(length, length, Operand(1));
2818 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2819
2820 // Copy bytes in word size chunks.
2821 bind(&word_loop);
2822 if (emit_debug_code()) {
2823 And(scratch, src, kPointerSize - 1);
2824 Assert(eq, "Expecting alignment for CopyBytes",
2825 scratch, Operand(zero_reg));
2826 }
2827 Branch(&byte_loop, lt, length, Operand(kPointerSize));
2828 lw(scratch, MemOperand(src));
2829 Addu(src, src, kPointerSize);
2830
2831 // TODO(kalmard) check if this can be optimized to use sw in most cases.
2832 // Can't use unaligned access - copy byte by byte.
2833 sb(scratch, MemOperand(dst, 0));
2834 srl(scratch, scratch, 8);
2835 sb(scratch, MemOperand(dst, 1));
2836 srl(scratch, scratch, 8);
2837 sb(scratch, MemOperand(dst, 2));
2838 srl(scratch, scratch, 8);
2839 sb(scratch, MemOperand(dst, 3));
2840 Addu(dst, dst, 4);
2841
2842 Subu(length, length, Operand(kPointerSize));
2843 Branch(&word_loop);
2844
2845 // Copy the last bytes if any left.
2846 bind(&byte_loop);
2847 Branch(&done, eq, length, Operand(zero_reg));
2848 bind(&byte_loop_1);
2849 lbu(scratch, MemOperand(src));
2850 Addu(src, src, 1);
2851 sb(scratch, MemOperand(dst));
2852 Addu(dst, dst, 1);
2853 Subu(length, length, Operand(1));
2854 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2855 bind(&done);
2856}
2857
2858
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002859void MacroAssembler::CheckFastElements(Register map,
2860 Register scratch,
2861 Label* fail) {
2862 STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
2863 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2864 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
2865}
2866
2867
Steve Block44f0eee2011-05-26 01:26:41 +01002868void MacroAssembler::CheckMap(Register obj,
2869 Register scratch,
2870 Handle<Map> map,
2871 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00002872 SmiCheckType smi_check_type) {
2873 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01002874 JumpIfSmi(obj, fail);
2875 }
2876 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2877 li(at, Operand(map));
2878 Branch(fail, ne, scratch, Operand(at));
2879}
2880
2881
Ben Murdoch257744e2011-11-30 15:57:28 +00002882void MacroAssembler::DispatchMap(Register obj,
2883 Register scratch,
2884 Handle<Map> map,
2885 Handle<Code> success,
2886 SmiCheckType smi_check_type) {
2887 Label fail;
2888 if (smi_check_type == DO_SMI_CHECK) {
2889 JumpIfSmi(obj, &fail);
2890 }
2891 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2892 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
2893 bind(&fail);
2894}
2895
2896
Steve Block44f0eee2011-05-26 01:26:41 +01002897void MacroAssembler::CheckMap(Register obj,
2898 Register scratch,
2899 Heap::RootListIndex index,
2900 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00002901 SmiCheckType smi_check_type) {
2902 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01002903 JumpIfSmi(obj, fail);
2904 }
2905 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2906 LoadRoot(at, index);
2907 Branch(fail, ne, scratch, Operand(at));
Steve Block6ded16b2010-05-10 14:33:55 +01002908}
2909
2910
Ben Murdoch257744e2011-11-30 15:57:28 +00002911void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
2912 CpuFeatures::Scope scope(FPU);
2913 if (IsMipsSoftFloatABI) {
2914 Move(dst, v0, v1);
2915 } else {
2916 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
2917 }
2918}
2919
2920
2921void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
2922 CpuFeatures::Scope scope(FPU);
2923 if (!IsMipsSoftFloatABI) {
2924 Move(f12, dreg);
2925 } else {
2926 Move(a0, a1, dreg);
2927 }
2928}
2929
2930
2931void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
2932 DoubleRegister dreg2) {
2933 CpuFeatures::Scope scope(FPU);
2934 if (!IsMipsSoftFloatABI) {
2935 if (dreg2.is(f12)) {
2936 ASSERT(!dreg1.is(f14));
2937 Move(f14, dreg2);
2938 Move(f12, dreg1);
2939 } else {
2940 Move(f12, dreg1);
2941 Move(f14, dreg2);
2942 }
2943 } else {
2944 Move(a0, a1, dreg1);
2945 Move(a2, a3, dreg2);
2946 }
2947}
2948
2949
2950void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
2951 Register reg) {
2952 CpuFeatures::Scope scope(FPU);
2953 if (!IsMipsSoftFloatABI) {
2954 Move(f12, dreg);
2955 Move(a2, reg);
2956 } else {
2957 Move(a2, reg);
2958 Move(a0, a1, dreg);
2959 }
2960}
2961
2962
2963void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
2964 // This macro takes the dst register to make the code more readable
2965 // at the call sites. However, the dst register has to be t1 to
2966 // follow the calling convention which requires the call type to be
2967 // in t1.
2968 ASSERT(dst.is(t1));
2969 if (call_kind == CALL_AS_FUNCTION) {
2970 li(dst, Operand(Smi::FromInt(1)));
2971 } else {
2972 li(dst, Operand(Smi::FromInt(0)));
2973 }
2974}
2975
2976
Steve Block6ded16b2010-05-10 14:33:55 +01002977// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002978// JavaScript invokes.
Steve Block6ded16b2010-05-10 14:33:55 +01002979
2980void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2981 const ParameterCount& actual,
2982 Handle<Code> code_constant,
2983 Register code_reg,
2984 Label* done,
Steve Block44f0eee2011-05-26 01:26:41 +01002985 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00002986 const CallWrapper& call_wrapper,
2987 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01002988 bool definitely_matches = false;
2989 Label regular_invoke;
2990
2991 // Check whether the expected and actual arguments count match. If not,
2992 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2993 // a0: actual arguments count
2994 // a1: function (passed through to callee)
2995 // a2: expected arguments count
2996 // a3: callee code entry
2997
2998 // The code below is made a lot easier because the calling code already sets
2999 // up actual and expected registers according to the contract if values are
3000 // passed in registers.
3001 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3002 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3003 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3004
3005 if (expected.is_immediate()) {
3006 ASSERT(actual.is_immediate());
3007 if (expected.immediate() == actual.immediate()) {
3008 definitely_matches = true;
3009 } else {
3010 li(a0, Operand(actual.immediate()));
3011 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3012 if (expected.immediate() == sentinel) {
3013 // Don't worry about adapting arguments for builtins that
3014 // don't want that done. Skip adaption code by making it look
3015 // like we have a match between expected and actual number of
3016 // arguments.
3017 definitely_matches = true;
3018 } else {
3019 li(a2, Operand(expected.immediate()));
3020 }
3021 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003022 } else if (actual.is_immediate()) {
3023 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3024 li(a0, Operand(actual.immediate()));
Steve Block6ded16b2010-05-10 14:33:55 +01003025 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003026 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
Steve Block6ded16b2010-05-10 14:33:55 +01003027 }
3028
3029 if (!definitely_matches) {
3030 if (!code_constant.is_null()) {
3031 li(a3, Operand(code_constant));
3032 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3033 }
3034
Steve Block44f0eee2011-05-26 01:26:41 +01003035 Handle<Code> adaptor =
3036 isolate()->builtins()->ArgumentsAdaptorTrampoline();
Steve Block6ded16b2010-05-10 14:33:55 +01003037 if (flag == CALL_FUNCTION) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003038 call_wrapper.BeforeCall(CallSize(adaptor));
Ben Murdoch257744e2011-11-30 15:57:28 +00003039 SetCallKind(t1, call_kind);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003040 Call(adaptor);
Ben Murdoch257744e2011-11-30 15:57:28 +00003041 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003042 jmp(done);
Steve Block6ded16b2010-05-10 14:33:55 +01003043 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003044 SetCallKind(t1, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003045 Jump(adaptor, RelocInfo::CODE_TARGET);
Steve Block6ded16b2010-05-10 14:33:55 +01003046 }
3047 bind(&regular_invoke);
3048 }
3049}
3050
Steve Block44f0eee2011-05-26 01:26:41 +01003051
Steve Block6ded16b2010-05-10 14:33:55 +01003052void MacroAssembler::InvokeCode(Register code,
3053 const ParameterCount& expected,
3054 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003055 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003056 const CallWrapper& call_wrapper,
3057 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003058 Label done;
3059
Steve Block44f0eee2011-05-26 01:26:41 +01003060 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003061 call_wrapper, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003062 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003063 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003064 Call(code);
3065 } else {
3066 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch257744e2011-11-30 15:57:28 +00003067 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003068 Jump(code);
3069 }
3070 // Continue here if InvokePrologue does handle the invocation due to
3071 // mismatched parameter counts.
3072 bind(&done);
3073}
3074
3075
3076void MacroAssembler::InvokeCode(Handle<Code> code,
3077 const ParameterCount& expected,
3078 const ParameterCount& actual,
3079 RelocInfo::Mode rmode,
Ben Murdoch257744e2011-11-30 15:57:28 +00003080 InvokeFlag flag,
3081 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003082 Label done;
3083
Ben Murdoch257744e2011-11-30 15:57:28 +00003084 InvokePrologue(expected, actual, code, no_reg, &done, flag,
3085 NullCallWrapper(), call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003086 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003087 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003088 Call(code, rmode);
3089 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003090 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003091 Jump(code, rmode);
3092 }
3093 // Continue here if InvokePrologue does handle the invocation due to
3094 // mismatched parameter counts.
3095 bind(&done);
3096}
3097
3098
3099void MacroAssembler::InvokeFunction(Register function,
3100 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003101 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003102 const CallWrapper& call_wrapper,
3103 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003104 // Contract with called JS functions requires that function is passed in a1.
3105 ASSERT(function.is(a1));
3106 Register expected_reg = a2;
3107 Register code_reg = a3;
3108
3109 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3110 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3111 lw(expected_reg,
3112 FieldMemOperand(code_reg,
3113 SharedFunctionInfo::kFormalParameterCountOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003114 sra(expected_reg, expected_reg, kSmiTagSize);
3115 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003116
3117 ParameterCount expected(expected_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003118 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003119}
3120
3121
3122void MacroAssembler::InvokeFunction(JSFunction* function,
3123 const ParameterCount& actual,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003124 InvokeFlag flag,
3125 CallKind call_kind) {
Steve Block44f0eee2011-05-26 01:26:41 +01003126 ASSERT(function->is_compiled());
3127
3128 // Get the function and setup the context.
3129 li(a1, Operand(Handle<JSFunction>(function)));
3130 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3131
3132 // Invoke the cached code.
3133 Handle<Code> code(function->code());
3134 ParameterCount expected(function->shared()->formal_parameter_count());
3135 if (V8::UseCrankshaft()) {
3136 UNIMPLEMENTED_MIPS();
3137 } else {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003138 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003139 }
3140}
3141
3142
3143void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3144 Register map,
3145 Register scratch,
3146 Label* fail) {
3147 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3148 IsInstanceJSObjectType(map, scratch, fail);
3149}
3150
3151
3152void MacroAssembler::IsInstanceJSObjectType(Register map,
3153 Register scratch,
3154 Label* fail) {
3155 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003156 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3157 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
Steve Block44f0eee2011-05-26 01:26:41 +01003158}
3159
3160
3161void MacroAssembler::IsObjectJSStringType(Register object,
3162 Register scratch,
3163 Label* fail) {
3164 ASSERT(kNotStringTag != 0);
3165
3166 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3167 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3168 And(scratch, scratch, Operand(kIsNotStringMask));
3169 Branch(fail, ne, scratch, Operand(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01003170}
3171
3172
3173// ---------------------------------------------------------------------------
3174// Support functions.
3175
Steve Block44f0eee2011-05-26 01:26:41 +01003176
3177void MacroAssembler::TryGetFunctionPrototype(Register function,
3178 Register result,
3179 Register scratch,
3180 Label* miss) {
3181 // Check that the receiver isn't a smi.
3182 JumpIfSmi(function, miss);
3183
3184 // Check that the function really is a function. Load map into result reg.
3185 GetObjectType(function, result, scratch);
3186 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3187
3188 // Make sure that the function has an instance prototype.
3189 Label non_instance;
3190 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3191 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3192 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3193
3194 // Get the prototype or initial map from the function.
3195 lw(result,
3196 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3197
3198 // If the prototype or initial map is the hole, don't return it and
3199 // simply miss the cache instead. This will allow us to allocate a
3200 // prototype object on-demand in the runtime system.
3201 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3202 Branch(miss, eq, result, Operand(t8));
3203
3204 // If the function does not have an initial map, we're done.
3205 Label done;
3206 GetObjectType(result, scratch, scratch);
3207 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3208
3209 // Get the prototype from the initial map.
3210 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3211 jmp(&done);
3212
3213 // Non-instance prototype: Fetch prototype from constructor field
3214 // in initial map.
3215 bind(&non_instance);
3216 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3217
3218 // All done.
3219 bind(&done);
3220}
Steve Block6ded16b2010-05-10 14:33:55 +01003221
3222
Steve Block44f0eee2011-05-26 01:26:41 +01003223void MacroAssembler::GetObjectType(Register object,
3224 Register map,
3225 Register type_reg) {
3226 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3227 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3228}
Steve Block6ded16b2010-05-10 14:33:55 +01003229
3230
3231// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003232// Runtime calls.
Steve Block6ded16b2010-05-10 14:33:55 +01003233
Andrei Popescu31002712010-02-23 13:46:05 +00003234void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
3235 Register r1, const Operand& r2) {
Steve Block6ded16b2010-05-10 14:33:55 +01003236 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003237 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
Andrei Popescu31002712010-02-23 13:46:05 +00003238}
3239
3240
Ben Murdoch257744e2011-11-30 15:57:28 +00003241MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
3242 Register r1, const Operand& r2) {
3243 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3244 Object* result;
3245 { MaybeObject* maybe_result = stub->TryGetCode();
3246 if (!maybe_result->ToObject(&result)) return maybe_result;
3247 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003248 Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
3249 kNoASTId, cond, r1, r2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003250 return result;
3251}
3252
3253
Steve Block44f0eee2011-05-26 01:26:41 +01003254void MacroAssembler::TailCallStub(CodeStub* stub) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003255 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Steve Block44f0eee2011-05-26 01:26:41 +01003256 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003257}
3258
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003259
Ben Murdoch257744e2011-11-30 15:57:28 +00003260MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
3261 Condition cond,
3262 Register r1,
3263 const Operand& r2) {
3264 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3265 Object* result;
3266 { MaybeObject* maybe_result = stub->TryGetCode();
3267 if (!maybe_result->ToObject(&result)) return maybe_result;
3268 }
3269 Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
3270 return result;
3271}
3272
3273
3274static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3275 return ref0.address() - ref1.address();
3276}
3277
3278
3279MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
3280 ExternalReference function, int stack_space) {
3281 ExternalReference next_address =
3282 ExternalReference::handle_scope_next_address();
3283 const int kNextOffset = 0;
3284 const int kLimitOffset = AddressOffset(
3285 ExternalReference::handle_scope_limit_address(),
3286 next_address);
3287 const int kLevelOffset = AddressOffset(
3288 ExternalReference::handle_scope_level_address(),
3289 next_address);
3290
3291 // Allocate HandleScope in callee-save registers.
3292 li(s3, Operand(next_address));
3293 lw(s0, MemOperand(s3, kNextOffset));
3294 lw(s1, MemOperand(s3, kLimitOffset));
3295 lw(s2, MemOperand(s3, kLevelOffset));
3296 Addu(s2, s2, Operand(1));
3297 sw(s2, MemOperand(s3, kLevelOffset));
3298
3299 // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3300 // (4 bytes) will be placed. This is also built into the Simulator.
3301 // Set up the pointer to the returned value (a0). It was allocated in
3302 // EnterExitFrame.
3303 addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3304
3305 // Native call returns to the DirectCEntry stub which redirects to the
3306 // return address pushed on stack (could have moved after GC).
3307 // DirectCEntry stub itself is generated early and never moves.
3308 DirectCEntryStub stub;
3309 stub.GenerateCall(this, function);
3310
3311 // As mentioned above, on MIPS a pointer is returned - we need to dereference
3312 // it to get the actual return value (which is also a pointer).
3313 lw(v0, MemOperand(v0));
3314
3315 Label promote_scheduled_exception;
3316 Label delete_allocated_handles;
3317 Label leave_exit_frame;
3318
3319 // If result is non-zero, dereference to get the result value
3320 // otherwise set it to undefined.
3321 Label skip;
3322 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3323 Branch(&skip, eq, v0, Operand(zero_reg));
3324 lw(a0, MemOperand(v0));
3325 bind(&skip);
3326 mov(v0, a0);
3327
3328 // No more valid handles (the result handle was the last one). Restore
3329 // previous handle scope.
3330 sw(s0, MemOperand(s3, kNextOffset));
3331 if (emit_debug_code()) {
3332 lw(a1, MemOperand(s3, kLevelOffset));
3333 Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3334 }
3335 Subu(s2, s2, Operand(1));
3336 sw(s2, MemOperand(s3, kLevelOffset));
3337 lw(at, MemOperand(s3, kLimitOffset));
3338 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3339
3340 // Check if the function scheduled an exception.
3341 bind(&leave_exit_frame);
3342 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3343 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3344 lw(t1, MemOperand(at));
3345 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3346 li(s0, Operand(stack_space));
3347 LeaveExitFrame(false, s0);
3348 Ret();
3349
3350 bind(&promote_scheduled_exception);
3351 MaybeObject* result = TryTailCallExternalReference(
3352 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
3353 if (result->IsFailure()) {
3354 return result;
3355 }
3356
3357 // HandleScope limit has changed. Delete allocated extensions.
3358 bind(&delete_allocated_handles);
3359 sw(s1, MemOperand(s3, kLimitOffset));
3360 mov(s0, v0);
3361 mov(a0, v0);
3362 PrepareCallCFunction(1, s1);
3363 li(a0, Operand(ExternalReference::isolate_address()));
3364 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3365 1);
3366 mov(v0, s0);
3367 jmp(&leave_exit_frame);
3368
3369 return result;
3370}
3371
Andrei Popescu31002712010-02-23 13:46:05 +00003372
Steve Block6ded16b2010-05-10 14:33:55 +01003373void MacroAssembler::IllegalOperation(int num_arguments) {
3374 if (num_arguments > 0) {
3375 addiu(sp, sp, num_arguments * kPointerSize);
3376 }
3377 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3378}
3379
3380
Steve Block44f0eee2011-05-26 01:26:41 +01003381void MacroAssembler::IndexFromHash(Register hash,
3382 Register index) {
3383 // If the hash field contains an array index pick it out. The assert checks
3384 // that the constants for the maximum number of digits for an array index
3385 // cached in the hash field and the number of bits reserved for it does not
3386 // conflict.
3387 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3388 (1 << String::kArrayIndexValueBits));
3389 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3390 // the low kHashShift bits.
3391 STATIC_ASSERT(kSmiTag == 0);
3392 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3393 sll(index, hash, kSmiTagSize);
3394}
3395
3396
3397void MacroAssembler::ObjectToDoubleFPURegister(Register object,
3398 FPURegister result,
3399 Register scratch1,
3400 Register scratch2,
3401 Register heap_number_map,
3402 Label* not_number,
3403 ObjectToDoubleFlags flags) {
3404 Label done;
3405 if ((flags & OBJECT_NOT_SMI) == 0) {
3406 Label not_smi;
3407 JumpIfNotSmi(object, &not_smi);
3408 // Remove smi tag and convert to double.
3409 sra(scratch1, object, kSmiTagSize);
3410 mtc1(scratch1, result);
3411 cvt_d_w(result, result);
3412 Branch(&done);
3413 bind(&not_smi);
3414 }
3415 // Check for heap number and load double value from it.
3416 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3417 Branch(not_number, ne, scratch1, Operand(heap_number_map));
3418
3419 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
3420 // If exponent is all ones the number is either a NaN or +/-Infinity.
3421 Register exponent = scratch1;
3422 Register mask_reg = scratch2;
3423 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
3424 li(mask_reg, HeapNumber::kExponentMask);
3425
3426 And(exponent, exponent, mask_reg);
3427 Branch(not_number, eq, exponent, Operand(mask_reg));
3428 }
3429 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
3430 bind(&done);
3431}
3432
3433
Steve Block44f0eee2011-05-26 01:26:41 +01003434void MacroAssembler::SmiToDoubleFPURegister(Register smi,
3435 FPURegister value,
3436 Register scratch1) {
3437 sra(scratch1, smi, kSmiTagSize);
3438 mtc1(scratch1, value);
3439 cvt_d_w(value, value);
3440}
3441
3442
Ben Murdoch257744e2011-11-30 15:57:28 +00003443void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3444 Register left,
3445 Register right,
3446 Register overflow_dst,
3447 Register scratch) {
3448 ASSERT(!dst.is(overflow_dst));
3449 ASSERT(!dst.is(scratch));
3450 ASSERT(!overflow_dst.is(scratch));
3451 ASSERT(!overflow_dst.is(left));
3452 ASSERT(!overflow_dst.is(right));
3453 ASSERT(!left.is(right));
3454
Ben Murdoch257744e2011-11-30 15:57:28 +00003455 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003456 mov(scratch, left); // Preserve left.
3457 addu(dst, left, right); // Left is overwritten.
3458 xor_(scratch, dst, scratch); // Original left.
3459 xor_(overflow_dst, dst, right);
3460 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003461 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003462 mov(scratch, right); // Preserve right.
3463 addu(dst, left, right); // Right is overwritten.
3464 xor_(scratch, dst, scratch); // Original right.
3465 xor_(overflow_dst, dst, left);
3466 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003467 } else {
3468 addu(dst, left, right);
3469 xor_(overflow_dst, dst, left);
3470 xor_(scratch, dst, right);
3471 and_(overflow_dst, scratch, overflow_dst);
3472 }
3473}
3474
3475
3476void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3477 Register left,
3478 Register right,
3479 Register overflow_dst,
3480 Register scratch) {
3481 ASSERT(!dst.is(overflow_dst));
3482 ASSERT(!dst.is(scratch));
3483 ASSERT(!overflow_dst.is(scratch));
3484 ASSERT(!overflow_dst.is(left));
3485 ASSERT(!overflow_dst.is(right));
3486 ASSERT(!left.is(right));
3487 ASSERT(!scratch.is(left));
3488 ASSERT(!scratch.is(right));
3489
Ben Murdoch257744e2011-11-30 15:57:28 +00003490 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003491 mov(scratch, left); // Preserve left.
3492 subu(dst, left, right); // Left is overwritten.
3493 xor_(overflow_dst, dst, scratch); // scratch is original left.
3494 xor_(scratch, scratch, right); // scratch is original left.
3495 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003496 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003497 mov(scratch, right); // Preserve right.
3498 subu(dst, left, right); // Right is overwritten.
3499 xor_(overflow_dst, dst, left);
3500 xor_(scratch, left, scratch); // Original right.
3501 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003502 } else {
3503 subu(dst, left, right);
3504 xor_(overflow_dst, dst, left);
3505 xor_(scratch, left, right);
3506 and_(overflow_dst, scratch, overflow_dst);
3507 }
3508}
3509
3510
Steve Block44f0eee2011-05-26 01:26:41 +01003511void MacroAssembler::CallRuntime(const Runtime::Function* f,
3512 int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003513 // All parameters are on the stack. v0 has the return value after call.
3514
3515 // If the expected number of arguments of the runtime function is
3516 // constant, we check that the actual number of arguments match the
3517 // expectation.
3518 if (f->nargs >= 0 && f->nargs != num_arguments) {
3519 IllegalOperation(num_arguments);
3520 return;
3521 }
3522
3523 // TODO(1236192): Most runtime routines don't need the number of
3524 // arguments passed in because it is constant. At some point we
3525 // should remove this need and make the runtime routine entry code
3526 // smarter.
3527 li(a0, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01003528 li(a1, Operand(ExternalReference(f, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01003529 CEntryStub stub(1);
3530 CallStub(&stub);
Andrei Popescu31002712010-02-23 13:46:05 +00003531}
3532
3533
Steve Block44f0eee2011-05-26 01:26:41 +01003534void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
3535 const Runtime::Function* function = Runtime::FunctionForId(id);
3536 li(a0, Operand(function->nargs));
3537 li(a1, Operand(ExternalReference(function, isolate())));
3538 CEntryStub stub(1);
3539 stub.SaveDoubles();
3540 CallStub(&stub);
3541}
3542
3543
Andrei Popescu31002712010-02-23 13:46:05 +00003544void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003545 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
3546}
3547
3548
Steve Block44f0eee2011-05-26 01:26:41 +01003549void MacroAssembler::CallExternalReference(const ExternalReference& ext,
3550 int num_arguments) {
3551 li(a0, Operand(num_arguments));
3552 li(a1, Operand(ext));
3553
3554 CEntryStub stub(1);
3555 CallStub(&stub);
3556}
3557
3558
Steve Block6ded16b2010-05-10 14:33:55 +01003559void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
3560 int num_arguments,
3561 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003562 // TODO(1236192): Most runtime routines don't need the number of
3563 // arguments passed in because it is constant. At some point we
3564 // should remove this need and make the runtime routine entry code
3565 // smarter.
3566 li(a0, Operand(num_arguments));
3567 JumpToExternalReference(ext);
Andrei Popescu31002712010-02-23 13:46:05 +00003568}
3569
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003570
Ben Murdoch257744e2011-11-30 15:57:28 +00003571MaybeObject* MacroAssembler::TryTailCallExternalReference(
3572 const ExternalReference& ext, int num_arguments, int result_size) {
3573 // TODO(1236192): Most runtime routines don't need the number of
3574 // arguments passed in because it is constant. At some point we
3575 // should remove this need and make the runtime routine entry code
3576 // smarter.
3577 li(a0, num_arguments);
3578 return TryJumpToExternalReference(ext);
3579}
3580
Andrei Popescu31002712010-02-23 13:46:05 +00003581
Steve Block6ded16b2010-05-10 14:33:55 +01003582void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
Andrei Popescu31002712010-02-23 13:46:05 +00003583 int num_arguments,
3584 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003585 TailCallExternalReference(ExternalReference(fid, isolate()),
3586 num_arguments,
3587 result_size);
Andrei Popescu31002712010-02-23 13:46:05 +00003588}
3589
3590
Steve Block6ded16b2010-05-10 14:33:55 +01003591void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Block44f0eee2011-05-26 01:26:41 +01003592 li(a1, Operand(builtin));
3593 CEntryStub stub(1);
3594 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003595}
3596
3597
Ben Murdoch257744e2011-11-30 15:57:28 +00003598MaybeObject* MacroAssembler::TryJumpToExternalReference(
3599 const ExternalReference& builtin) {
3600 li(a1, Operand(builtin));
3601 CEntryStub stub(1);
3602 return TryTailCallStub(&stub);
3603}
3604
3605
Andrei Popescu31002712010-02-23 13:46:05 +00003606void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Ben Murdoch257744e2011-11-30 15:57:28 +00003607 InvokeFlag flag,
3608 const CallWrapper& call_wrapper) {
Steve Block44f0eee2011-05-26 01:26:41 +01003609 GetBuiltinEntry(t9, id);
Ben Murdoch257744e2011-11-30 15:57:28 +00003610 if (flag == CALL_FUNCTION) {
3611 call_wrapper.BeforeCall(CallSize(t9));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003612 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01003613 Call(t9);
Ben Murdoch257744e2011-11-30 15:57:28 +00003614 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003615 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003616 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003617 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01003618 Jump(t9);
3619 }
3620}
3621
3622
3623void MacroAssembler::GetBuiltinFunction(Register target,
3624 Builtins::JavaScript id) {
3625 // Load the builtins object into target register.
3626 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3627 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
3628 // Load the JavaScript builtin function from the builtins object.
3629 lw(target, FieldMemOperand(target,
3630 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
Andrei Popescu31002712010-02-23 13:46:05 +00003631}
3632
3633
3634void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Steve Block44f0eee2011-05-26 01:26:41 +01003635 ASSERT(!target.is(a1));
3636 GetBuiltinFunction(a1, id);
3637 // Load the code entry point from the builtins object.
3638 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Andrei Popescu31002712010-02-23 13:46:05 +00003639}
3640
3641
3642void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3643 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003644 if (FLAG_native_code_counters && counter->Enabled()) {
3645 li(scratch1, Operand(value));
3646 li(scratch2, Operand(ExternalReference(counter)));
3647 sw(scratch1, MemOperand(scratch2));
3648 }
Andrei Popescu31002712010-02-23 13:46:05 +00003649}
3650
3651
3652void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3653 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003654 ASSERT(value > 0);
3655 if (FLAG_native_code_counters && counter->Enabled()) {
3656 li(scratch2, Operand(ExternalReference(counter)));
3657 lw(scratch1, MemOperand(scratch2));
3658 Addu(scratch1, scratch1, Operand(value));
3659 sw(scratch1, MemOperand(scratch2));
3660 }
Andrei Popescu31002712010-02-23 13:46:05 +00003661}
3662
3663
3664void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3665 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003666 ASSERT(value > 0);
3667 if (FLAG_native_code_counters && counter->Enabled()) {
3668 li(scratch2, Operand(ExternalReference(counter)));
3669 lw(scratch1, MemOperand(scratch2));
3670 Subu(scratch1, scratch1, Operand(value));
3671 sw(scratch1, MemOperand(scratch2));
3672 }
Andrei Popescu31002712010-02-23 13:46:05 +00003673}
3674
3675
Steve Block6ded16b2010-05-10 14:33:55 +01003676// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003677// Debugging.
Andrei Popescu31002712010-02-23 13:46:05 +00003678
3679void MacroAssembler::Assert(Condition cc, const char* msg,
3680 Register rs, Operand rt) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003681 if (emit_debug_code())
Steve Block44f0eee2011-05-26 01:26:41 +01003682 Check(cc, msg, rs, rt);
3683}
3684
3685
3686void MacroAssembler::AssertRegisterIsRoot(Register reg,
3687 Heap::RootListIndex index) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003688 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003689 LoadRoot(at, index);
3690 Check(eq, "Register did not match expected root", reg, Operand(at));
3691 }
3692}
3693
3694
3695void MacroAssembler::AssertFastElements(Register elements) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003696 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003697 ASSERT(!elements.is(at));
3698 Label ok;
Ben Murdoch257744e2011-11-30 15:57:28 +00003699 push(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01003700 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
3701 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3702 Branch(&ok, eq, elements, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003703 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
3704 Branch(&ok, eq, elements, Operand(at));
Steve Block44f0eee2011-05-26 01:26:41 +01003705 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
3706 Branch(&ok, eq, elements, Operand(at));
3707 Abort("JSObject with fast elements map has slow elements");
3708 bind(&ok);
Ben Murdoch257744e2011-11-30 15:57:28 +00003709 pop(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01003710 }
Andrei Popescu31002712010-02-23 13:46:05 +00003711}
3712
3713
3714void MacroAssembler::Check(Condition cc, const char* msg,
3715 Register rs, Operand rt) {
Steve Block44f0eee2011-05-26 01:26:41 +01003716 Label L;
3717 Branch(&L, cc, rs, rt);
3718 Abort(msg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003719 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01003720 bind(&L);
Andrei Popescu31002712010-02-23 13:46:05 +00003721}
3722
3723
3724void MacroAssembler::Abort(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01003725 Label abort_start;
3726 bind(&abort_start);
3727 // We want to pass the msg string like a smi to avoid GC
3728 // problems, however msg is not guaranteed to be aligned
3729 // properly. Instead, we pass an aligned pointer that is
3730 // a proper v8 smi, but also pass the alignment difference
3731 // from the real pointer as a smi.
3732 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
3733 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
3734 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
3735#ifdef DEBUG
3736 if (msg != NULL) {
3737 RecordComment("Abort message: ");
3738 RecordComment(msg);
3739 }
3740#endif
3741 // Disable stub call restrictions to always allow calls to abort.
3742 AllowStubCallsScope allow_scope(this, true);
3743
3744 li(a0, Operand(p0));
Ben Murdoch257744e2011-11-30 15:57:28 +00003745 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003746 li(a0, Operand(Smi::FromInt(p1 - p0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003747 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003748 CallRuntime(Runtime::kAbort, 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003749 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01003750 if (is_trampoline_pool_blocked()) {
3751 // If the calling code cares about the exact number of
3752 // instructions generated, we insert padding here to keep the size
3753 // of the Abort macro constant.
3754 // Currently in debug mode with debug_code enabled the number of
3755 // generated instructions is 14, so we use this as a maximum value.
3756 static const int kExpectedAbortInstructions = 14;
3757 int abort_instructions = InstructionsGeneratedSince(&abort_start);
3758 ASSERT(abort_instructions <= kExpectedAbortInstructions);
3759 while (abort_instructions++ < kExpectedAbortInstructions) {
3760 nop();
3761 }
3762 }
3763}
3764
3765
3766void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3767 if (context_chain_length > 0) {
3768 // Move up the chain of contexts to the context containing the slot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003769 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01003770 for (int i = 1; i < context_chain_length; i++) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003771 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01003772 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003773 } else {
3774 // Slot is in the current function context. Move it into the
3775 // destination register in case we store into it (the write barrier
3776 // cannot be allowed to destroy the context in esi).
3777 Move(dst, cp);
3778 }
Steve Block44f0eee2011-05-26 01:26:41 +01003779}
3780
3781
3782void MacroAssembler::LoadGlobalFunction(int index, Register function) {
3783 // Load the global or builtins object from the current context.
3784 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3785 // Load the global context from the global or builtins object.
3786 lw(function, FieldMemOperand(function,
3787 GlobalObject::kGlobalContextOffset));
3788 // Load the function from the global context.
3789 lw(function, MemOperand(function, Context::SlotOffset(index)));
3790}
3791
3792
3793void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
3794 Register map,
3795 Register scratch) {
3796 // Load the initial map. The global functions all have initial maps.
3797 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00003798 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003799 Label ok, fail;
Ben Murdoch257744e2011-11-30 15:57:28 +00003800 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +01003801 Branch(&ok);
3802 bind(&fail);
3803 Abort("Global functions must have initial map");
3804 bind(&ok);
3805 }
Andrei Popescu31002712010-02-23 13:46:05 +00003806}
3807
Steve Block6ded16b2010-05-10 14:33:55 +01003808
3809void MacroAssembler::EnterFrame(StackFrame::Type type) {
3810 addiu(sp, sp, -5 * kPointerSize);
Steve Block44f0eee2011-05-26 01:26:41 +01003811 li(t8, Operand(Smi::FromInt(type)));
3812 li(t9, Operand(CodeObject()));
Steve Block6ded16b2010-05-10 14:33:55 +01003813 sw(ra, MemOperand(sp, 4 * kPointerSize));
3814 sw(fp, MemOperand(sp, 3 * kPointerSize));
3815 sw(cp, MemOperand(sp, 2 * kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003816 sw(t8, MemOperand(sp, 1 * kPointerSize));
3817 sw(t9, MemOperand(sp, 0 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01003818 addiu(fp, sp, 3 * kPointerSize);
3819}
3820
3821
3822void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3823 mov(sp, fp);
3824 lw(fp, MemOperand(sp, 0 * kPointerSize));
3825 lw(ra, MemOperand(sp, 1 * kPointerSize));
3826 addiu(sp, sp, 2 * kPointerSize);
3827}
3828
3829
Ben Murdoch257744e2011-11-30 15:57:28 +00003830void MacroAssembler::EnterExitFrame(bool save_doubles,
3831 int stack_space) {
3832 // Setup the frame structure on the stack.
3833 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
3834 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
3835 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
Steve Block6ded16b2010-05-10 14:33:55 +01003836
Ben Murdoch257744e2011-11-30 15:57:28 +00003837 // This is how the stack will look:
3838 // fp + 2 (==kCallerSPDisplacement) - old stack's end
3839 // [fp + 1 (==kCallerPCOffset)] - saved old ra
3840 // [fp + 0 (==kCallerFPOffset)] - saved old fp
3841 // [fp - 1 (==kSPOffset)] - sp of the called function
3842 // [fp - 2 (==kCodeOffset)] - CodeObject
3843 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
3844 // new stack (will contain saved ra)
Steve Block6ded16b2010-05-10 14:33:55 +01003845
3846 // Save registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00003847 addiu(sp, sp, -4 * kPointerSize);
3848 sw(ra, MemOperand(sp, 3 * kPointerSize));
3849 sw(fp, MemOperand(sp, 2 * kPointerSize));
3850 addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
Steve Block6ded16b2010-05-10 14:33:55 +01003851
Ben Murdoch257744e2011-11-30 15:57:28 +00003852 if (emit_debug_code()) {
3853 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
3854 }
3855
3856 li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
3857 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003858
3859 // Save the frame pointer and the context in top.
Steve Block44f0eee2011-05-26 01:26:41 +01003860 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
3861 sw(fp, MemOperand(t8));
3862 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
3863 sw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003864
Ben Murdoch257744e2011-11-30 15:57:28 +00003865 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
Steve Block44f0eee2011-05-26 01:26:41 +01003866 if (save_doubles) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003867 // The stack must be allign to 0 modulo 8 for stores with sdc1.
Steve Block44f0eee2011-05-26 01:26:41 +01003868 ASSERT(kDoubleSize == frame_alignment);
Ben Murdoch257744e2011-11-30 15:57:28 +00003869 if (frame_alignment > 0) {
3870 ASSERT(IsPowerOf2(frame_alignment));
3871 And(sp, sp, Operand(-frame_alignment)); // Align stack.
3872 }
3873 int space = FPURegister::kNumRegisters * kDoubleSize;
Steve Block44f0eee2011-05-26 01:26:41 +01003874 Subu(sp, sp, Operand(space));
3875 // Remember: we only need to save every 2nd double FPU value.
3876 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
3877 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00003878 sdc1(reg, MemOperand(sp, i * kDoubleSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003879 }
Steve Block44f0eee2011-05-26 01:26:41 +01003880 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003881
3882 // Reserve place for the return address, stack space and an optional slot
3883 // (used by the DirectCEntryStub to hold the return value if a struct is
3884 // returned) and align the frame preparing for calling the runtime function.
3885 ASSERT(stack_space >= 0);
3886 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
3887 if (frame_alignment > 0) {
3888 ASSERT(IsPowerOf2(frame_alignment));
3889 And(sp, sp, Operand(-frame_alignment)); // Align stack.
3890 }
3891
3892 // Set the exit frame sp value to point just before the return address
3893 // location.
3894 addiu(at, sp, kPointerSize);
3895 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003896}
3897
3898
Ben Murdoch257744e2011-11-30 15:57:28 +00003899void MacroAssembler::LeaveExitFrame(bool save_doubles,
3900 Register argument_count) {
Steve Block44f0eee2011-05-26 01:26:41 +01003901 // Optionally restore all double registers.
3902 if (save_doubles) {
Steve Block44f0eee2011-05-26 01:26:41 +01003903 // Remember: we only need to restore every 2nd double FPU value.
Ben Murdoch257744e2011-11-30 15:57:28 +00003904 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003905 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
3906 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00003907 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003908 }
3909 }
3910
Steve Block6ded16b2010-05-10 14:33:55 +01003911 // Clear top frame.
Steve Block44f0eee2011-05-26 01:26:41 +01003912 li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
3913 sw(zero_reg, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003914
3915 // Restore current context from top and clear it in debug mode.
Steve Block44f0eee2011-05-26 01:26:41 +01003916 li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
3917 lw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003918#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01003919 sw(a3, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003920#endif
3921
3922 // Pop the arguments, restore registers, and return.
3923 mov(sp, fp); // Respect ABI stack constraint.
Ben Murdoch257744e2011-11-30 15:57:28 +00003924 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
3925 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
3926 addiu(sp, sp, 8);
3927 if (argument_count.is_valid()) {
3928 sll(t8, argument_count, kPointerSizeLog2);
3929 addu(sp, sp, t8);
3930 }
Steve Block6ded16b2010-05-10 14:33:55 +01003931}
3932
3933
Steve Block44f0eee2011-05-26 01:26:41 +01003934void MacroAssembler::InitializeNewString(Register string,
3935 Register length,
3936 Heap::RootListIndex map_index,
3937 Register scratch1,
3938 Register scratch2) {
3939 sll(scratch1, length, kSmiTagSize);
3940 LoadRoot(scratch2, map_index);
3941 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
3942 li(scratch1, Operand(String::kEmptyHashField));
3943 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
3944 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
3945}
3946
3947
3948int MacroAssembler::ActivationFrameAlignment() {
3949#if defined(V8_HOST_ARCH_MIPS)
3950 // Running on the real platform. Use the alignment as mandated by the local
3951 // environment.
3952 // Note: This will break if we ever start generating snapshots on one Mips
3953 // platform for another Mips platform with a different alignment.
3954 return OS::ActivationFrameAlignment();
3955#else // defined(V8_HOST_ARCH_MIPS)
3956 // If we are using the simulator then we should always align to the expected
3957 // alignment. As the simulator is used to generate snapshots we do not know
3958 // if the target platform will need alignment, so this is controlled from a
3959 // flag.
3960 return FLAG_sim_stack_alignment;
3961#endif // defined(V8_HOST_ARCH_MIPS)
3962}
3963
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003964
Ben Murdoch257744e2011-11-30 15:57:28 +00003965void MacroAssembler::AssertStackIsAligned() {
3966 if (emit_debug_code()) {
3967 const int frame_alignment = ActivationFrameAlignment();
3968 const int frame_alignment_mask = frame_alignment - 1;
Steve Block44f0eee2011-05-26 01:26:41 +01003969
Ben Murdoch257744e2011-11-30 15:57:28 +00003970 if (frame_alignment > kPointerSize) {
3971 Label alignment_as_expected;
3972 ASSERT(IsPowerOf2(frame_alignment));
3973 andi(at, sp, frame_alignment_mask);
3974 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
3975 // Don't use Check here, as it will call Runtime_Abort re-entering here.
3976 stop("Unexpected stack alignment");
3977 bind(&alignment_as_expected);
3978 }
Steve Block6ded16b2010-05-10 14:33:55 +01003979 }
Steve Block6ded16b2010-05-10 14:33:55 +01003980}
3981
Steve Block44f0eee2011-05-26 01:26:41 +01003982
Steve Block44f0eee2011-05-26 01:26:41 +01003983void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3984 Register reg,
3985 Register scratch,
3986 Label* not_power_of_two_or_zero) {
3987 Subu(scratch, reg, Operand(1));
3988 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
3989 scratch, Operand(zero_reg));
3990 and_(at, scratch, reg); // In the delay slot.
3991 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
3992}
3993
3994
3995void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3996 Register reg2,
3997 Label* on_not_both_smi) {
3998 STATIC_ASSERT(kSmiTag == 0);
3999 ASSERT_EQ(1, kSmiTagMask);
4000 or_(at, reg1, reg2);
4001 andi(at, at, kSmiTagMask);
4002 Branch(on_not_both_smi, ne, at, Operand(zero_reg));
4003}
4004
4005
4006void MacroAssembler::JumpIfEitherSmi(Register reg1,
4007 Register reg2,
4008 Label* on_either_smi) {
4009 STATIC_ASSERT(kSmiTag == 0);
4010 ASSERT_EQ(1, kSmiTagMask);
4011 // Both Smi tags must be 1 (not Smi).
4012 and_(at, reg1, reg2);
4013 andi(at, at, kSmiTagMask);
4014 Branch(on_either_smi, eq, at, Operand(zero_reg));
4015}
4016
4017
4018void MacroAssembler::AbortIfSmi(Register object) {
4019 STATIC_ASSERT(kSmiTag == 0);
4020 andi(at, object, kSmiTagMask);
4021 Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4022}
4023
4024
4025void MacroAssembler::AbortIfNotSmi(Register object) {
4026 STATIC_ASSERT(kSmiTag == 0);
4027 andi(at, object, kSmiTagMask);
4028 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4029}
4030
4031
Ben Murdoch257744e2011-11-30 15:57:28 +00004032void MacroAssembler::AbortIfNotString(Register object) {
4033 STATIC_ASSERT(kSmiTag == 0);
4034 And(t0, object, Operand(kSmiTagMask));
4035 Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4036 push(object);
4037 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4038 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4039 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4040 pop(object);
4041}
4042
4043
Steve Block44f0eee2011-05-26 01:26:41 +01004044void MacroAssembler::AbortIfNotRootValue(Register src,
4045 Heap::RootListIndex root_value_index,
4046 const char* message) {
4047 ASSERT(!src.is(at));
4048 LoadRoot(at, root_value_index);
4049 Assert(eq, message, src, Operand(at));
4050}
4051
4052
4053void MacroAssembler::JumpIfNotHeapNumber(Register object,
4054 Register heap_number_map,
4055 Register scratch,
4056 Label* on_not_heap_number) {
4057 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4058 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4059 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4060}
4061
4062
4063void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4064 Register first,
4065 Register second,
4066 Register scratch1,
4067 Register scratch2,
4068 Label* failure) {
4069 // Test that both first and second are sequential ASCII strings.
4070 // Assume that they are non-smis.
4071 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4072 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4073 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4074 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4075
4076 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4077 scratch2,
4078 scratch1,
4079 scratch2,
4080 failure);
4081}
4082
4083
4084void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4085 Register second,
4086 Register scratch1,
4087 Register scratch2,
4088 Label* failure) {
4089 // Check that neither is a smi.
4090 STATIC_ASSERT(kSmiTag == 0);
4091 And(scratch1, first, Operand(second));
4092 And(scratch1, scratch1, Operand(kSmiTagMask));
4093 Branch(failure, eq, scratch1, Operand(zero_reg));
4094 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4095 second,
4096 scratch1,
4097 scratch2,
4098 failure);
4099}
4100
4101
4102void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4103 Register first,
4104 Register second,
4105 Register scratch1,
4106 Register scratch2,
4107 Label* failure) {
4108 int kFlatAsciiStringMask =
4109 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4110 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4111 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4112 andi(scratch1, first, kFlatAsciiStringMask);
4113 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4114 andi(scratch2, second, kFlatAsciiStringMask);
4115 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4116}
4117
4118
4119void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4120 Register scratch,
4121 Label* failure) {
4122 int kFlatAsciiStringMask =
4123 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4124 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4125 And(scratch, type, Operand(kFlatAsciiStringMask));
4126 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4127}
4128
4129
4130static const int kRegisterPassedArguments = 4;
4131
4132void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
4133 int frame_alignment = ActivationFrameAlignment();
4134
Steve Block44f0eee2011-05-26 01:26:41 +01004135 // Up to four simple arguments are passed in registers a0..a3.
4136 // Those four arguments must have reserved argument slots on the stack for
4137 // mips, even though those argument slots are not normally used.
4138 // Remaining arguments are pushed on the stack, above (higher address than)
4139 // the argument slots.
4140 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
4141 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
4142 0 : num_arguments - kRegisterPassedArguments) +
4143 (StandardFrameConstants::kCArgsSlotsSize /
4144 kPointerSize);
4145 if (frame_alignment > kPointerSize) {
4146 // Make stack end at alignment and make room for num_arguments - 4 words
4147 // and the original value of sp.
4148 mov(scratch, sp);
4149 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4150 ASSERT(IsPowerOf2(frame_alignment));
4151 And(sp, sp, Operand(-frame_alignment));
4152 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4153 } else {
4154 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4155 }
4156}
4157
4158
4159void MacroAssembler::CallCFunction(ExternalReference function,
4160 int num_arguments) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004161 CallCFunctionHelper(no_reg, function, t8, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004162}
4163
4164
4165void MacroAssembler::CallCFunction(Register function,
4166 Register scratch,
4167 int num_arguments) {
4168 CallCFunctionHelper(function,
4169 ExternalReference::the_hole_value_location(isolate()),
4170 scratch,
4171 num_arguments);
4172}
4173
4174
4175void MacroAssembler::CallCFunctionHelper(Register function,
4176 ExternalReference function_reference,
4177 Register scratch,
4178 int num_arguments) {
Steve Block44f0eee2011-05-26 01:26:41 +01004179 // Make sure that the stack is aligned before calling a C function unless
4180 // running in the simulator. The simulator has its own alignment check which
4181 // provides more information.
4182 // The argument stots are presumed to have been set up by
4183 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4184
4185#if defined(V8_HOST_ARCH_MIPS)
4186 if (emit_debug_code()) {
4187 int frame_alignment = OS::ActivationFrameAlignment();
4188 int frame_alignment_mask = frame_alignment - 1;
4189 if (frame_alignment > kPointerSize) {
4190 ASSERT(IsPowerOf2(frame_alignment));
4191 Label alignment_as_expected;
4192 And(at, sp, Operand(frame_alignment_mask));
4193 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4194 // Don't use Check here, as it will call Runtime_Abort possibly
4195 // re-entering here.
4196 stop("Unexpected alignment in CallCFunction");
4197 bind(&alignment_as_expected);
4198 }
4199 }
4200#endif // V8_HOST_ARCH_MIPS
4201
4202 // Just call directly. The function called cannot cause a GC, or
4203 // allow preemption, so the return address in the link register
4204 // stays correct.
Steve Block44f0eee2011-05-26 01:26:41 +01004205
4206 if (function.is(no_reg)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004207 function = t9;
4208 li(function, Operand(function_reference));
4209 } else if (!function.is(t9)) {
4210 mov(t9, function);
Steve Block44f0eee2011-05-26 01:26:41 +01004211 function = t9;
4212 }
4213
4214 Call(function);
4215
4216 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
4217 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
4218 0 : num_arguments - kRegisterPassedArguments) +
4219 (StandardFrameConstants::kCArgsSlotsSize /
4220 kPointerSize);
4221
4222 if (OS::ActivationFrameAlignment() > kPointerSize) {
4223 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4224 } else {
4225 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4226 }
4227}
4228
4229
4230#undef BRANCH_ARGS_CHECK
4231
4232
Ben Murdoch257744e2011-11-30 15:57:28 +00004233void MacroAssembler::LoadInstanceDescriptors(Register map,
4234 Register descriptors) {
4235 lw(descriptors,
4236 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
4237 Label not_smi;
4238 JumpIfNotSmi(descriptors, &not_smi);
4239 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
4240 bind(&not_smi);
4241}
4242
4243
Steve Block44f0eee2011-05-26 01:26:41 +01004244CodePatcher::CodePatcher(byte* address, int instructions)
4245 : address_(address),
4246 instructions_(instructions),
4247 size_(instructions * Assembler::kInstrSize),
Ben Murdoch257744e2011-11-30 15:57:28 +00004248 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
Steve Block44f0eee2011-05-26 01:26:41 +01004249 // Create a new macro assembler pointing to the address of the code to patch.
4250 // The size is adjusted with kGap on order for the assembler to generate size
4251 // bytes of instructions without failing with buffer size constraints.
4252 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4253}
4254
4255
4256CodePatcher::~CodePatcher() {
4257 // Indicate that code has changed.
4258 CPU::FlushICache(address_, size_);
4259
4260 // Check that the code was patched as expected.
4261 ASSERT(masm_.pc_ == address_ + size_);
4262 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4263}
4264
4265
Ben Murdoch257744e2011-11-30 15:57:28 +00004266void CodePatcher::Emit(Instr instr) {
4267 masm()->emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +01004268}
4269
4270
4271void CodePatcher::Emit(Address addr) {
4272 masm()->emit(reinterpret_cast<Instr>(addr));
4273}
4274
4275
Ben Murdoch257744e2011-11-30 15:57:28 +00004276void CodePatcher::ChangeBranchCondition(Condition cond) {
4277 Instr instr = Assembler::instr_at(masm_.pc_);
4278 ASSERT(Assembler::IsBranch(instr));
4279 uint32_t opcode = Assembler::GetOpcodeField(instr);
4280 // Currently only the 'eq' and 'ne' cond values are supported and the simple
4281 // branch instructions (with opcode being the branch type).
4282 // There are some special cases (see Assembler::IsBranch()) so extending this
4283 // would be tricky.
4284 ASSERT(opcode == BEQ ||
4285 opcode == BNE ||
4286 opcode == BLEZ ||
4287 opcode == BGTZ ||
4288 opcode == BEQL ||
4289 opcode == BNEL ||
4290 opcode == BLEZL ||
4291 opcode == BGTZL);
4292 opcode = (cond == eq) ? BEQ : BNE;
4293 instr = (instr & ~kOpcodeMask) | opcode;
4294 masm_.emit(instr);
4295}
Steve Block44f0eee2011-05-26 01:26:41 +01004296
4297
Andrei Popescu31002712010-02-23 13:46:05 +00004298} } // namespace v8::internal
4299
Leon Clarkef7060e22010-06-03 12:02:55 +01004300#endif // V8_TARGET_ARCH_MIPS