blob: 4c48ef183c2c7789ea2618865f03be5894eae7d4 [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Ben Murdoch257744e2011-11-30 15:57:28 +000028#include <limits.h> // For LONG_MIN, LONG_MAX.
Andrei Popescu31002712010-02-23 13:46:05 +000029
30#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_MIPS)
33
Andrei Popescu31002712010-02-23 13:46:05 +000034#include "bootstrapper.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000035#include "codegen.h"
Andrei Popescu31002712010-02-23 13:46:05 +000036#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
Ben Murdoch257744e2011-11-30 15:57:28 +000042MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size),
Andrei Popescu31002712010-02-23 13:46:05 +000044 generating_stub_(false),
Ben Murdoch257744e2011-11-30 15:57:28 +000045 allow_stub_calls_(true) {
46 if (isolate() != NULL) {
47 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
48 isolate());
49 }
Andrei Popescu31002712010-02-23 13:46:05 +000050}
51
52
Andrei Popescu31002712010-02-23 13:46:05 +000053void MacroAssembler::LoadRoot(Register destination,
54 Heap::RootListIndex index) {
Steve Block6ded16b2010-05-10 14:33:55 +010055 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000056}
57
Steve Block44f0eee2011-05-26 01:26:41 +010058
Andrei Popescu31002712010-02-23 13:46:05 +000059void MacroAssembler::LoadRoot(Register destination,
60 Heap::RootListIndex index,
61 Condition cond,
62 Register src1, const Operand& src2) {
Steve Block44f0eee2011-05-26 01:26:41 +010063 Branch(2, NegateCondition(cond), src1, src2);
Steve Block6ded16b2010-05-10 14:33:55 +010064 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000065}
66
67
Steve Block44f0eee2011-05-26 01:26:41 +010068void MacroAssembler::StoreRoot(Register source,
69 Heap::RootListIndex index) {
70 sw(source, MemOperand(s6, index << kPointerSizeLog2));
71}
72
73
74void MacroAssembler::StoreRoot(Register source,
75 Heap::RootListIndex index,
76 Condition cond,
77 Register src1, const Operand& src2) {
78 Branch(2, NegateCondition(cond), src1, src2);
79 sw(source, MemOperand(s6, index << kPointerSizeLog2));
80}
81
82
83void MacroAssembler::RecordWriteHelper(Register object,
84 Register address,
85 Register scratch) {
Ben Murdoch257744e2011-11-30 15:57:28 +000086 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +010087 // Check that the object is not in new space.
88 Label not_in_new_space;
89 InNewSpace(object, scratch, ne, &not_in_new_space);
90 Abort("new-space object passed to RecordWriteHelper");
91 bind(&not_in_new_space);
92 }
93
94 // Calculate page address: Clear bits from 0 to kPageSizeBits.
95 if (mips32r2) {
96 Ins(object, zero_reg, 0, kPageSizeBits);
97 } else {
98 // The Ins macro is slow on r1, so use shifts instead.
99 srl(object, object, kPageSizeBits);
100 sll(object, object, kPageSizeBits);
101 }
102
103 // Calculate region number.
104 Ext(address, address, Page::kRegionSizeLog2,
105 kPageSizeBits - Page::kRegionSizeLog2);
106
107 // Mark region dirty.
108 lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
109 li(at, Operand(1));
110 sllv(at, at, address);
111 or_(scratch, scratch, at);
112 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
113}
114
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000115
Ben Murdoch257744e2011-11-30 15:57:28 +0000116// Push and pop all registers that can hold pointers.
117void MacroAssembler::PushSafepointRegisters() {
118 // Safepoints expect a block of kNumSafepointRegisters values on the
119 // stack, so adjust the stack for unsaved registers.
120 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
121 ASSERT(num_unsaved >= 0);
122 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
123 MultiPush(kSafepointSavedRegisters);
124}
125
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000126
Ben Murdoch257744e2011-11-30 15:57:28 +0000127void MacroAssembler::PopSafepointRegisters() {
128 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
129 MultiPop(kSafepointSavedRegisters);
130 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
131}
132
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000133
Ben Murdoch257744e2011-11-30 15:57:28 +0000134void MacroAssembler::PushSafepointRegistersAndDoubles() {
135 PushSafepointRegisters();
136 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
137 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
138 FPURegister reg = FPURegister::FromAllocationIndex(i);
139 sdc1(reg, MemOperand(sp, i * kDoubleSize));
140 }
141}
142
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000143
Ben Murdoch257744e2011-11-30 15:57:28 +0000144void MacroAssembler::PopSafepointRegistersAndDoubles() {
145 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
146 FPURegister reg = FPURegister::FromAllocationIndex(i);
147 ldc1(reg, MemOperand(sp, i * kDoubleSize));
148 }
149 Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
150 PopSafepointRegisters();
151}
152
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000153
Ben Murdoch257744e2011-11-30 15:57:28 +0000154void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
155 Register dst) {
156 sw(src, SafepointRegistersAndDoublesSlot(dst));
157}
158
159
160void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
161 sw(src, SafepointRegisterSlot(dst));
162}
163
164
165void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
166 lw(dst, SafepointRegisterSlot(src));
167}
168
169
170int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
171 // The registers are pushed starting with the highest encoding,
172 // which means that lowest encodings are closest to the stack pointer.
173 return kSafepointRegisterStackIndexMap[reg_code];
174}
175
176
177MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
178 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
179}
180
181
182MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
183 // General purpose registers are pushed last on the stack.
184 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
185 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
186 return MemOperand(sp, doubles_size + register_offset);
187}
188
189
190
Steve Block44f0eee2011-05-26 01:26:41 +0100191
192void MacroAssembler::InNewSpace(Register object,
193 Register scratch,
194 Condition cc,
195 Label* branch) {
196 ASSERT(cc == eq || cc == ne);
197 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
198 Branch(branch, cc, scratch,
199 Operand(ExternalReference::new_space_start(isolate())));
200}
201
202
203// Will clobber 4 registers: object, scratch0, scratch1, at. The
204// register 'object' contains a heap object pointer. The heap object
205// tag is shifted away.
206void MacroAssembler::RecordWrite(Register object,
207 Operand offset,
208 Register scratch0,
209 Register scratch1) {
210 // The compiled code assumes that record write doesn't change the
211 // context register, so we check that none of the clobbered
212 // registers are cp.
213 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
214
215 Label done;
216
217 // First, test that the object is not in the new space. We cannot set
218 // region marks for new space pages.
219 InNewSpace(object, scratch0, eq, &done);
220
221 // Add offset into the object.
222 Addu(scratch0, object, offset);
223
224 // Record the actual write.
225 RecordWriteHelper(object, scratch0, scratch1);
226
227 bind(&done);
228
229 // Clobber all input registers when running with the debug-code flag
230 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000231 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100232 li(object, Operand(BitCast<int32_t>(kZapValue)));
233 li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
234 li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
235 }
236}
237
238
239// Will clobber 4 registers: object, address, scratch, ip. The
240// register 'object' contains a heap object pointer. The heap object
241// tag is shifted away.
242void MacroAssembler::RecordWrite(Register object,
243 Register address,
Andrei Popescu31002712010-02-23 13:46:05 +0000244 Register scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100245 // The compiled code assumes that record write doesn't change the
246 // context register, so we check that none of the clobbered
247 // registers are cp.
248 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
249
250 Label done;
251
252 // First, test that the object is not in the new space. We cannot set
253 // region marks for new space pages.
254 InNewSpace(object, scratch, eq, &done);
255
256 // Record the actual write.
257 RecordWriteHelper(object, address, scratch);
258
259 bind(&done);
260
261 // Clobber all input registers when running with the debug-code flag
262 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000263 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100264 li(object, Operand(BitCast<int32_t>(kZapValue)));
265 li(address, Operand(BitCast<int32_t>(kZapValue)));
266 li(scratch, Operand(BitCast<int32_t>(kZapValue)));
267 }
268}
269
270
271// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000272// Allocation support.
Steve Block44f0eee2011-05-26 01:26:41 +0100273
274
275void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
276 Register scratch,
277 Label* miss) {
278 Label same_contexts;
279
280 ASSERT(!holder_reg.is(scratch));
281 ASSERT(!holder_reg.is(at));
282 ASSERT(!scratch.is(at));
283
284 // Load current lexical context from the stack frame.
285 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
286 // In debug mode, make sure the lexical context is set.
287#ifdef DEBUG
288 Check(ne, "we should not have an empty lexical context",
289 scratch, Operand(zero_reg));
290#endif
291
292 // Load the global context of the current context.
293 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
294 lw(scratch, FieldMemOperand(scratch, offset));
295 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
296
297 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000298 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100299 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000300 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100301 // Read the first word and compare to the global_context_map.
302 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
303 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
304 Check(eq, "JSGlobalObject::global_context should be a global context.",
305 holder_reg, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +0000306 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100307 }
308
309 // Check if both contexts are the same.
310 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
311 Branch(&same_contexts, eq, scratch, Operand(at));
312
313 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000314 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100315 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000316 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100317 mov(holder_reg, at); // Move at to its holding place.
318 LoadRoot(at, Heap::kNullValueRootIndex);
319 Check(ne, "JSGlobalProxy::context() should not be null.",
320 holder_reg, Operand(at));
321
322 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
323 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
324 Check(eq, "JSGlobalObject::global_context should be a global context.",
325 holder_reg, Operand(at));
326 // Restore at is not needed. at is reloaded below.
Ben Murdoch257744e2011-11-30 15:57:28 +0000327 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100328 // Restore at to holder's context.
329 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
330 }
331
332 // Check that the security token in the calling global object is
333 // compatible with the security token in the receiving global
334 // object.
335 int token_offset = Context::kHeaderSize +
336 Context::SECURITY_TOKEN_INDEX * kPointerSize;
337
338 lw(scratch, FieldMemOperand(scratch, token_offset));
339 lw(at, FieldMemOperand(at, token_offset));
340 Branch(miss, ne, scratch, Operand(at));
341
342 bind(&same_contexts);
Andrei Popescu31002712010-02-23 13:46:05 +0000343}
344
345
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000346void MacroAssembler::LoadFromNumberDictionary(Label* miss,
347 Register elements,
348 Register key,
349 Register result,
350 Register reg0,
351 Register reg1,
352 Register reg2) {
353 // Register use:
354 //
355 // elements - holds the slow-case elements of the receiver on entry.
356 // Unchanged unless 'result' is the same register.
357 //
358 // key - holds the smi key on entry.
359 // Unchanged unless 'result' is the same register.
360 //
361 //
362 // result - holds the result on exit if the load succeeded.
363 // Allowed to be the same as 'key' or 'result'.
364 // Unchanged on bailout so 'key' or 'result' can be used
365 // in further computation.
366 //
367 // Scratch registers:
368 //
369 // reg0 - holds the untagged key on entry and holds the hash once computed.
370 //
371 // reg1 - Used to hold the capacity mask of the dictionary.
372 //
373 // reg2 - Used for the index into the dictionary.
374 // at - Temporary (avoid MacroAssembler instructions also using 'at').
375 Label done;
376
377 // Compute the hash code from the untagged key. This must be kept in sync
378 // with ComputeIntegerHash in utils.h.
379 //
380 // hash = ~hash + (hash << 15);
381 nor(reg1, reg0, zero_reg);
382 sll(at, reg0, 15);
383 addu(reg0, reg1, at);
384
385 // hash = hash ^ (hash >> 12);
386 srl(at, reg0, 12);
387 xor_(reg0, reg0, at);
388
389 // hash = hash + (hash << 2);
390 sll(at, reg0, 2);
391 addu(reg0, reg0, at);
392
393 // hash = hash ^ (hash >> 4);
394 srl(at, reg0, 4);
395 xor_(reg0, reg0, at);
396
397 // hash = hash * 2057;
398 li(reg1, Operand(2057));
399 mul(reg0, reg0, reg1);
400
401 // hash = hash ^ (hash >> 16);
402 srl(at, reg0, 16);
403 xor_(reg0, reg0, at);
404
405 // Compute the capacity mask.
406 lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
407 sra(reg1, reg1, kSmiTagSize);
408 Subu(reg1, reg1, Operand(1));
409
410 // Generate an unrolled loop that performs a few probes before giving up.
411 static const int kProbes = 4;
412 for (int i = 0; i < kProbes; i++) {
413 // Use reg2 for index calculations and keep the hash intact in reg0.
414 mov(reg2, reg0);
415 // Compute the masked index: (hash + i + i * i) & mask.
416 if (i > 0) {
417 Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
418 }
419 and_(reg2, reg2, reg1);
420
421 // Scale the index by multiplying by the element size.
422 ASSERT(NumberDictionary::kEntrySize == 3);
423 sll(at, reg2, 1); // 2x.
424 addu(reg2, reg2, at); // reg2 = reg2 * 3.
425
426 // Check if the key is identical to the name.
427 sll(at, reg2, kPointerSizeLog2);
428 addu(reg2, elements, at);
429
430 lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
431 if (i != kProbes - 1) {
432 Branch(&done, eq, key, Operand(at));
433 } else {
434 Branch(miss, ne, key, Operand(at));
435 }
436 }
437
438 bind(&done);
439 // Check that the value is a normal property.
440 // reg2: elements + (index * kPointerSize).
441 const int kDetailsOffset =
442 NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
443 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
Ben Murdoch589d6972011-11-30 16:04:58 +0000444 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000445 Branch(miss, ne, at, Operand(zero_reg));
446
447 // Get the value at the masked, scaled index and return.
448 const int kValueOffset =
449 NumberDictionary::kElementsStartOffset + kPointerSize;
450 lw(result, FieldMemOperand(reg2, kValueOffset));
451}
452
453
Andrei Popescu31002712010-02-23 13:46:05 +0000454// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000455// Instruction macros.
Andrei Popescu31002712010-02-23 13:46:05 +0000456
Andrei Popescu31002712010-02-23 13:46:05 +0000457void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
458 if (rt.is_reg()) {
459 addu(rd, rs, rt.rm());
460 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100461 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000462 addiu(rd, rs, rt.imm32_);
463 } else {
464 // li handles the relocation.
465 ASSERT(!rs.is(at));
466 li(at, rt);
467 addu(rd, rs, at);
468 }
469 }
470}
471
472
Steve Block44f0eee2011-05-26 01:26:41 +0100473void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
474 if (rt.is_reg()) {
475 subu(rd, rs, rt.rm());
476 } else {
477 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
478 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
479 } else {
480 // li handles the relocation.
481 ASSERT(!rs.is(at));
482 li(at, rt);
483 subu(rd, rs, at);
484 }
485 }
486}
487
488
Andrei Popescu31002712010-02-23 13:46:05 +0000489void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
490 if (rt.is_reg()) {
491 mul(rd, rs, rt.rm());
492 } else {
493 // li handles the relocation.
494 ASSERT(!rs.is(at));
495 li(at, rt);
496 mul(rd, rs, at);
497 }
498}
499
500
501void MacroAssembler::Mult(Register rs, const Operand& rt) {
502 if (rt.is_reg()) {
503 mult(rs, rt.rm());
504 } else {
505 // li handles the relocation.
506 ASSERT(!rs.is(at));
507 li(at, rt);
508 mult(rs, at);
509 }
510}
511
512
513void MacroAssembler::Multu(Register rs, const Operand& rt) {
514 if (rt.is_reg()) {
515 multu(rs, rt.rm());
516 } else {
517 // li handles the relocation.
518 ASSERT(!rs.is(at));
519 li(at, rt);
520 multu(rs, at);
521 }
522}
523
524
525void MacroAssembler::Div(Register rs, const Operand& rt) {
526 if (rt.is_reg()) {
527 div(rs, rt.rm());
528 } else {
529 // li handles the relocation.
530 ASSERT(!rs.is(at));
531 li(at, rt);
532 div(rs, at);
533 }
534}
535
536
537void MacroAssembler::Divu(Register rs, const Operand& rt) {
538 if (rt.is_reg()) {
539 divu(rs, rt.rm());
540 } else {
541 // li handles the relocation.
542 ASSERT(!rs.is(at));
543 li(at, rt);
544 divu(rs, at);
545 }
546}
547
548
549void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
550 if (rt.is_reg()) {
551 and_(rd, rs, rt.rm());
552 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100553 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000554 andi(rd, rs, rt.imm32_);
555 } else {
556 // li handles the relocation.
557 ASSERT(!rs.is(at));
558 li(at, rt);
559 and_(rd, rs, at);
560 }
561 }
562}
563
564
565void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
566 if (rt.is_reg()) {
567 or_(rd, rs, rt.rm());
568 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100569 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000570 ori(rd, rs, rt.imm32_);
571 } else {
572 // li handles the relocation.
573 ASSERT(!rs.is(at));
574 li(at, rt);
575 or_(rd, rs, at);
576 }
577 }
578}
579
580
581void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
582 if (rt.is_reg()) {
583 xor_(rd, rs, rt.rm());
584 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100585 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000586 xori(rd, rs, rt.imm32_);
587 } else {
588 // li handles the relocation.
589 ASSERT(!rs.is(at));
590 li(at, rt);
591 xor_(rd, rs, at);
592 }
593 }
594}
595
596
597void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
598 if (rt.is_reg()) {
599 nor(rd, rs, rt.rm());
600 } else {
601 // li handles the relocation.
602 ASSERT(!rs.is(at));
603 li(at, rt);
604 nor(rd, rs, at);
605 }
606}
607
608
Ben Murdoch257744e2011-11-30 15:57:28 +0000609void MacroAssembler::Neg(Register rs, const Operand& rt) {
610 ASSERT(rt.is_reg());
611 ASSERT(!at.is(rs));
612 ASSERT(!at.is(rt.rm()));
613 li(at, -1);
614 xor_(rs, rt.rm(), at);
615}
616
617
Andrei Popescu31002712010-02-23 13:46:05 +0000618void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
619 if (rt.is_reg()) {
620 slt(rd, rs, rt.rm());
621 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100622 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000623 slti(rd, rs, rt.imm32_);
624 } else {
625 // li handles the relocation.
626 ASSERT(!rs.is(at));
627 li(at, rt);
628 slt(rd, rs, at);
629 }
630 }
631}
632
633
634void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
635 if (rt.is_reg()) {
636 sltu(rd, rs, rt.rm());
637 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100638 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000639 sltiu(rd, rs, rt.imm32_);
640 } else {
641 // li handles the relocation.
642 ASSERT(!rs.is(at));
643 li(at, rt);
644 sltu(rd, rs, at);
645 }
646 }
647}
648
649
Steve Block44f0eee2011-05-26 01:26:41 +0100650void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
651 if (mips32r2) {
652 if (rt.is_reg()) {
653 rotrv(rd, rs, rt.rm());
654 } else {
655 rotr(rd, rs, rt.imm32_);
656 }
657 } else {
658 if (rt.is_reg()) {
659 subu(at, zero_reg, rt.rm());
660 sllv(at, rs, at);
661 srlv(rd, rs, rt.rm());
662 or_(rd, rd, at);
663 } else {
664 if (rt.imm32_ == 0) {
665 srl(rd, rs, 0);
666 } else {
667 srl(at, rs, rt.imm32_);
668 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
669 or_(rd, rd, at);
670 }
671 }
672 }
Andrei Popescu31002712010-02-23 13:46:05 +0000673}
674
675
Steve Block44f0eee2011-05-26 01:26:41 +0100676//------------Pseudo-instructions-------------
677
Andrei Popescu31002712010-02-23 13:46:05 +0000678void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
679 ASSERT(!j.is_reg());
Steve Block44f0eee2011-05-26 01:26:41 +0100680 BlockTrampolinePoolScope block_trampoline_pool(this);
681 if (!MustUseReg(j.rmode_) && !gen2instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000682 // Normal load of an immediate value which does not need Relocation Info.
683 if (is_int16(j.imm32_)) {
684 addiu(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100685 } else if (!(j.imm32_ & kHiMask)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000686 ori(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100687 } else if (!(j.imm32_ & kImm16Mask)) {
688 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
Andrei Popescu31002712010-02-23 13:46:05 +0000689 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100690 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
691 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000692 }
Steve Block44f0eee2011-05-26 01:26:41 +0100693 } else if (MustUseReg(j.rmode_) || gen2instr) {
694 if (MustUseReg(j.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000695 RecordRelocInfo(j.rmode_, j.imm32_);
696 }
697 // We need always the same number of instructions as we may need to patch
698 // this code to load another value which may need 2 instructions to load.
Ben Murdoch257744e2011-11-30 15:57:28 +0000699 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
700 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000701 }
702}
703
704
Andrei Popescu31002712010-02-23 13:46:05 +0000705void MacroAssembler::MultiPush(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000706 int16_t num_to_push = NumberOfBitsSet(regs);
707 int16_t stack_offset = num_to_push * kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000708
Ben Murdoch589d6972011-11-30 16:04:58 +0000709 Subu(sp, sp, Operand(stack_offset));
Steve Block6ded16b2010-05-10 14:33:55 +0100710 for (int16_t i = kNumRegisters; i > 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000711 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000712 stack_offset -= kPointerSize;
713 sw(ToRegister(i), MemOperand(sp, stack_offset));
Andrei Popescu31002712010-02-23 13:46:05 +0000714 }
715 }
716}
717
718
719void MacroAssembler::MultiPushReversed(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000720 int16_t num_to_push = NumberOfBitsSet(regs);
721 int16_t stack_offset = num_to_push * kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000722
Ben Murdoch589d6972011-11-30 16:04:58 +0000723 Subu(sp, sp, Operand(stack_offset));
Steve Block6ded16b2010-05-10 14:33:55 +0100724 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000725 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000726 stack_offset -= kPointerSize;
727 sw(ToRegister(i), MemOperand(sp, stack_offset));
Andrei Popescu31002712010-02-23 13:46:05 +0000728 }
729 }
730}
731
732
733void MacroAssembler::MultiPop(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000734 int16_t stack_offset = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000735
Steve Block6ded16b2010-05-10 14:33:55 +0100736 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000737 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000738 lw(ToRegister(i), MemOperand(sp, stack_offset));
739 stack_offset += kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000740 }
741 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000742 addiu(sp, sp, stack_offset);
Andrei Popescu31002712010-02-23 13:46:05 +0000743}
744
745
746void MacroAssembler::MultiPopReversed(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000747 int16_t stack_offset = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000748
Steve Block6ded16b2010-05-10 14:33:55 +0100749 for (int16_t i = kNumRegisters; i > 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000750 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000751 lw(ToRegister(i), MemOperand(sp, stack_offset));
752 stack_offset += kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000753 }
754 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000755 addiu(sp, sp, stack_offset);
756}
757
758
759void MacroAssembler::MultiPushFPU(RegList regs) {
760 CpuFeatures::Scope scope(FPU);
761 int16_t num_to_push = NumberOfBitsSet(regs);
762 int16_t stack_offset = num_to_push * kDoubleSize;
763
764 Subu(sp, sp, Operand(stack_offset));
765 for (int16_t i = kNumRegisters; i > 0; i--) {
766 if ((regs & (1 << i)) != 0) {
767 stack_offset -= kDoubleSize;
768 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
769 }
770 }
771}
772
773
774void MacroAssembler::MultiPushReversedFPU(RegList regs) {
775 CpuFeatures::Scope scope(FPU);
776 int16_t num_to_push = NumberOfBitsSet(regs);
777 int16_t stack_offset = num_to_push * kDoubleSize;
778
779 Subu(sp, sp, Operand(stack_offset));
780 for (int16_t i = 0; i < kNumRegisters; i++) {
781 if ((regs & (1 << i)) != 0) {
782 stack_offset -= kDoubleSize;
783 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
784 }
785 }
786}
787
788
789void MacroAssembler::MultiPopFPU(RegList regs) {
790 CpuFeatures::Scope scope(FPU);
791 int16_t stack_offset = 0;
792
793 for (int16_t i = 0; i < kNumRegisters; i++) {
794 if ((regs & (1 << i)) != 0) {
795 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
796 stack_offset += kDoubleSize;
797 }
798 }
799 addiu(sp, sp, stack_offset);
800}
801
802
803void MacroAssembler::MultiPopReversedFPU(RegList regs) {
804 CpuFeatures::Scope scope(FPU);
805 int16_t stack_offset = 0;
806
807 for (int16_t i = kNumRegisters; i > 0; i--) {
808 if ((regs & (1 << i)) != 0) {
809 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
810 stack_offset += kDoubleSize;
811 }
812 }
813 addiu(sp, sp, stack_offset);
Andrei Popescu31002712010-02-23 13:46:05 +0000814}
815
816
Steve Block44f0eee2011-05-26 01:26:41 +0100817void MacroAssembler::Ext(Register rt,
818 Register rs,
819 uint16_t pos,
820 uint16_t size) {
821 ASSERT(pos < 32);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000822 ASSERT(pos + size < 33);
Andrei Popescu31002712010-02-23 13:46:05 +0000823
Steve Block44f0eee2011-05-26 01:26:41 +0100824 if (mips32r2) {
825 ext_(rt, rs, pos, size);
826 } else {
827 // Move rs to rt and shift it left then right to get the
828 // desired bitfield on the right side and zeroes on the left.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000829 int shift_left = 32 - (pos + size);
830 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
831
832 int shift_right = 32 - size;
833 if (shift_right > 0) {
834 srl(rt, rt, shift_right);
835 }
Steve Block44f0eee2011-05-26 01:26:41 +0100836 }
837}
838
839
840void MacroAssembler::Ins(Register rt,
841 Register rs,
842 uint16_t pos,
843 uint16_t size) {
844 ASSERT(pos < 32);
845 ASSERT(pos + size < 32);
846
847 if (mips32r2) {
848 ins_(rt, rs, pos, size);
849 } else {
850 ASSERT(!rt.is(t8) && !rs.is(t8));
851
852 srl(t8, rt, pos + size);
853 // The left chunk from rt that needs to
854 // be saved is on the right side of t8.
855 sll(at, t8, pos + size);
856 // The 'at' register now contains the left chunk on
857 // the left (proper position) and zeroes.
858 sll(t8, rt, 32 - pos);
859 // t8 now contains the right chunk on the left and zeroes.
860 srl(t8, t8, 32 - pos);
861 // t8 now contains the right chunk on
862 // the right (proper position) and zeroes.
863 or_(rt, at, t8);
864 // rt now contains the left and right chunks from the original rt
865 // in their proper position and zeroes in the middle.
866 sll(t8, rs, 32 - size);
867 // t8 now contains the chunk from rs on the left and zeroes.
868 srl(t8, t8, 32 - size - pos);
869 // t8 now contains the original chunk from rs in
870 // the middle (proper position).
871 or_(rt, rt, t8);
872 // rt now contains the result of the ins instruction in R2 mode.
873 }
874}
875
876
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000877void MacroAssembler::Cvt_d_uw(FPURegister fd,
878 FPURegister fs,
879 FPURegister scratch) {
880 // Move the data from fs to t8.
881 mfc1(t8, fs);
882 Cvt_d_uw(fd, t8, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100883}
884
885
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000886void MacroAssembler::Cvt_d_uw(FPURegister fd,
887 Register rs,
888 FPURegister scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100889 // Convert rs to a FP value in fd (and fd + 1).
890 // We do this by converting rs minus the MSB to avoid sign conversion,
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000891 // then adding 2^31 to the result (if needed).
Steve Block44f0eee2011-05-26 01:26:41 +0100892
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000893 ASSERT(!fd.is(scratch));
Steve Block44f0eee2011-05-26 01:26:41 +0100894 ASSERT(!rs.is(t9));
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000895 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100896
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000897 // Save rs's MSB to t9.
898 Ext(t9, rs, 31, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100899 // Remove rs's MSB.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000900 Ext(at, rs, 0, 31);
901 // Move the result to fd.
902 mtc1(at, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100903
904 // Convert fd to a real FP value.
905 cvt_d_w(fd, fd);
906
907 Label conversion_done;
908
909 // If rs's MSB was 0, it's done.
910 // Otherwise we need to add that to the FP register.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000911 Branch(&conversion_done, eq, t9, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +0100912
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000913 // Load 2^31 into f20 as its float representation.
914 li(at, 0x41E00000);
915 mtc1(at, FPURegister::from_code(scratch.code() + 1));
916 mtc1(zero_reg, scratch);
917 // Add it to fd.
918 add_d(fd, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100919
Steve Block44f0eee2011-05-26 01:26:41 +0100920 bind(&conversion_done);
921}
922
923
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000924void MacroAssembler::Trunc_uw_d(FPURegister fd,
925 FPURegister fs,
926 FPURegister scratch) {
927 Trunc_uw_d(fs, t8, scratch);
928 mtc1(t8, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100929}
930
931
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000932void MacroAssembler::Trunc_uw_d(FPURegister fd,
933 Register rs,
934 FPURegister scratch) {
935 ASSERT(!fd.is(scratch));
936 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100937
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000938 // Load 2^31 into scratch as its float representation.
939 li(at, 0x41E00000);
940 mtc1(at, FPURegister::from_code(scratch.code() + 1));
941 mtc1(zero_reg, scratch);
942 // Test if scratch > fd.
943 c(OLT, D, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100944
945 Label simple_convert;
946 // If fd < 2^31 we can convert it normally.
947 bc1t(&simple_convert);
948
949 // First we subtract 2^31 from fd, then trunc it to rs
950 // and add 2^31 to rs.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000951 sub_d(scratch, fd, scratch);
952 trunc_w_d(scratch, scratch);
953 mfc1(rs, scratch);
954 Or(rs, rs, 1 << 31);
Steve Block44f0eee2011-05-26 01:26:41 +0100955
956 Label done;
957 Branch(&done);
958 // Simple conversion.
959 bind(&simple_convert);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000960 trunc_w_d(scratch, fd);
961 mfc1(rs, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100962
963 bind(&done);
964}
965
966
967// Tries to get a signed int32 out of a double precision floating point heap
968// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
969// 32bits signed integer range.
970// This method implementation differs from the ARM version for performance
971// reasons.
972void MacroAssembler::ConvertToInt32(Register source,
973 Register dest,
974 Register scratch,
975 Register scratch2,
976 FPURegister double_scratch,
977 Label *not_int32) {
978 Label right_exponent, done;
979 // Get exponent word (ENDIAN issues).
980 lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
981 // Get exponent alone in scratch2.
982 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
983 // Load dest with zero. We use this either for the final shift or
984 // for the answer.
985 mov(dest, zero_reg);
986 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
987 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
988 // the exponent that we are fastest at and also the highest exponent we can
989 // handle here.
990 const uint32_t non_smi_exponent =
991 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
992 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
993 Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
994 // If the exponent is higher than that then go to not_int32 case. This
995 // catches numbers that don't fit in a signed int32, infinities and NaNs.
996 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
997
998 // We know the exponent is smaller than 30 (biased). If it is less than
999 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
1000 // it rounds to zero.
1001 const uint32_t zero_exponent =
1002 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1003 Subu(scratch2, scratch2, Operand(zero_exponent));
1004 // Dest already has a Smi zero.
1005 Branch(&done, lt, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00001006 if (!CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001007 // We have a shifted exponent between 0 and 30 in scratch2.
1008 srl(dest, scratch2, HeapNumber::kExponentShift);
1009 // We now have the exponent in dest. Subtract from 30 to get
1010 // how much to shift down.
1011 li(at, Operand(30));
1012 subu(dest, at, dest);
1013 }
1014 bind(&right_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +00001015 if (CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001016 CpuFeatures::Scope scope(FPU);
1017 // MIPS FPU instructions implementing double precision to integer
1018 // conversion using round to zero. Since the FP value was qualified
1019 // above, the resulting integer should be a legal int32.
1020 // The original 'Exponent' word is still in scratch.
1021 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1022 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1023 trunc_w_d(double_scratch, double_scratch);
1024 mfc1(dest, double_scratch);
1025 } else {
1026 // On entry, dest has final downshift, scratch has original sign/exp/mant.
1027 // Save sign bit in top bit of dest.
1028 And(scratch2, scratch, Operand(0x80000000));
1029 Or(dest, dest, Operand(scratch2));
1030 // Put back the implicit 1, just above mantissa field.
1031 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1032
1033 // Shift up the mantissa bits to take up the space the exponent used to
1034 // take. We just orred in the implicit bit so that took care of one and
1035 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1036 // distance. But we want to clear the sign-bit so shift one more bit
1037 // left, then shift right one bit.
1038 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1039 sll(scratch, scratch, shift_distance + 1);
1040 srl(scratch, scratch, 1);
1041
1042 // Get the second half of the double. For some exponents we don't
1043 // actually need this because the bits get shifted out again, but
1044 // it's probably slower to test than just to do it.
1045 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1046 // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
1047 // The width of the field here is the same as the shift amount above.
1048 const int field_width = shift_distance;
1049 Ext(scratch2, scratch2, 32-shift_distance, field_width);
1050 Ins(scratch, scratch2, 0, field_width);
1051 // Move down according to the exponent.
1052 srlv(scratch, scratch, dest);
1053 // Prepare the negative version of our integer.
1054 subu(scratch2, zero_reg, scratch);
1055 // Trick to check sign bit (msb) held in dest, count leading zero.
1056 // 0 indicates negative, save negative version with conditional move.
1057 clz(dest, dest);
1058 movz(scratch, scratch2, dest);
1059 mov(dest, scratch);
1060 }
1061 bind(&done);
1062}
1063
1064
Ben Murdoch257744e2011-11-30 15:57:28 +00001065void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1066 Register input_high,
1067 Register input_low,
1068 Register scratch) {
1069 Label done, normal_exponent, restore_sign;
1070 // Extract the biased exponent in result.
1071 Ext(result,
1072 input_high,
1073 HeapNumber::kExponentShift,
1074 HeapNumber::kExponentBits);
1075
1076 // Check for Infinity and NaNs, which should return 0.
1077 Subu(scratch, result, HeapNumber::kExponentMask);
1078 movz(result, zero_reg, scratch);
1079 Branch(&done, eq, scratch, Operand(zero_reg));
1080
1081 // Express exponent as delta to (number of mantissa bits + 31).
1082 Subu(result,
1083 result,
1084 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1085
1086 // If the delta is strictly positive, all bits would be shifted away,
1087 // which means that we can return 0.
1088 Branch(&normal_exponent, le, result, Operand(zero_reg));
1089 mov(result, zero_reg);
1090 Branch(&done);
1091
1092 bind(&normal_exponent);
1093 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1094 // Calculate shift.
1095 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1096
1097 // Save the sign.
1098 Register sign = result;
1099 result = no_reg;
1100 And(sign, input_high, Operand(HeapNumber::kSignMask));
1101
1102 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1103 // to check for this specific case.
1104 Label high_shift_needed, high_shift_done;
1105 Branch(&high_shift_needed, lt, scratch, Operand(32));
1106 mov(input_high, zero_reg);
1107 Branch(&high_shift_done);
1108 bind(&high_shift_needed);
1109
1110 // Set the implicit 1 before the mantissa part in input_high.
1111 Or(input_high,
1112 input_high,
1113 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1114 // Shift the mantissa bits to the correct position.
1115 // We don't need to clear non-mantissa bits as they will be shifted away.
1116 // If they weren't, it would mean that the answer is in the 32bit range.
1117 sllv(input_high, input_high, scratch);
1118
1119 bind(&high_shift_done);
1120
1121 // Replace the shifted bits with bits from the lower mantissa word.
1122 Label pos_shift, shift_done;
1123 li(at, 32);
1124 subu(scratch, at, scratch);
1125 Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1126
1127 // Negate scratch.
1128 Subu(scratch, zero_reg, scratch);
1129 sllv(input_low, input_low, scratch);
1130 Branch(&shift_done);
1131
1132 bind(&pos_shift);
1133 srlv(input_low, input_low, scratch);
1134
1135 bind(&shift_done);
1136 Or(input_high, input_high, Operand(input_low));
1137 // Restore sign if necessary.
1138 mov(scratch, sign);
1139 result = sign;
1140 sign = no_reg;
1141 Subu(result, zero_reg, input_high);
1142 movz(result, input_high, scratch);
1143 bind(&done);
1144}
1145
1146
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001147void MacroAssembler::EmitECMATruncate(Register result,
1148 FPURegister double_input,
1149 FPURegister single_scratch,
1150 Register scratch,
1151 Register input_high,
1152 Register input_low) {
1153 CpuFeatures::Scope scope(FPU);
1154 ASSERT(!input_high.is(result));
1155 ASSERT(!input_low.is(result));
1156 ASSERT(!input_low.is(input_high));
1157 ASSERT(!scratch.is(result) &&
1158 !scratch.is(input_high) &&
1159 !scratch.is(input_low));
1160 ASSERT(!single_scratch.is(double_input));
1161
1162 Label done;
1163 Label manual;
1164
1165 // Clear cumulative exception flags and save the FCSR.
1166 Register scratch2 = input_high;
1167 cfc1(scratch2, FCSR);
1168 ctc1(zero_reg, FCSR);
1169 // Try a conversion to a signed integer.
1170 trunc_w_d(single_scratch, double_input);
1171 mfc1(result, single_scratch);
1172 // Retrieve and restore the FCSR.
1173 cfc1(scratch, FCSR);
1174 ctc1(scratch2, FCSR);
1175 // Check for overflow and NaNs.
1176 And(scratch,
1177 scratch,
1178 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1179 // If we had no exceptions we are done.
1180 Branch(&done, eq, scratch, Operand(zero_reg));
1181
1182 // Load the double value and perform a manual truncation.
1183 Move(input_low, input_high, double_input);
1184 EmitOutOfInt32RangeTruncate(result,
1185 input_high,
1186 input_low,
1187 scratch);
1188 bind(&done);
1189}
1190
1191
Ben Murdoch257744e2011-11-30 15:57:28 +00001192void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1193 Register src,
1194 int num_least_bits) {
1195 Ext(dst, src, kSmiTagSize, num_least_bits);
1196}
1197
1198
1199void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1200 Register src,
1201 int num_least_bits) {
1202 And(dst, src, Operand((1 << num_least_bits) - 1));
1203}
1204
1205
Steve Block44f0eee2011-05-26 01:26:41 +01001206// Emulated condtional branches do not emit a nop in the branch delay slot.
1207//
1208// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1209#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1210 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1211 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1212
1213
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001214bool MacroAssembler::UseAbsoluteCodePointers() {
1215 if (is_trampoline_emitted()) {
1216 return true;
1217 } else {
1218 return false;
1219 }
1220}
1221
1222
Steve Block44f0eee2011-05-26 01:26:41 +01001223void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001224 BranchShort(offset, bdslot);
1225}
1226
1227
1228void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1229 const Operand& rt,
1230 BranchDelaySlot bdslot) {
1231 BranchShort(offset, cond, rs, rt, bdslot);
1232}
1233
1234
1235void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1236 bool is_label_near = is_near(L);
1237 if (UseAbsoluteCodePointers() && !is_label_near) {
1238 Jr(L, bdslot);
1239 } else {
1240 BranchShort(L, bdslot);
1241 }
1242}
1243
1244
1245void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1246 const Operand& rt,
1247 BranchDelaySlot bdslot) {
1248 bool is_label_near = is_near(L);
1249 if (UseAbsoluteCodePointers() && !is_label_near) {
1250 Label skip;
1251 Condition neg_cond = NegateCondition(cond);
1252 BranchShort(&skip, neg_cond, rs, rt);
1253 Jr(L, bdslot);
1254 bind(&skip);
1255 } else {
1256 BranchShort(L, cond, rs, rt, bdslot);
1257 }
1258}
1259
1260
1261void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001262 b(offset);
1263
1264 // Emit a nop in the branch delay slot if required.
1265 if (bdslot == PROTECT)
1266 nop();
1267}
1268
1269
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001270void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1271 const Operand& rt,
1272 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001273 BRANCH_ARGS_CHECK(cond, rs, rt);
1274 ASSERT(!rs.is(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01001275 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001276 Register scratch = at;
1277
Andrei Popescu31002712010-02-23 13:46:05 +00001278 if (rt.is_reg()) {
1279 // We don't want any other register but scratch clobbered.
1280 ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
1281 r2 = rt.rm_;
Steve Block44f0eee2011-05-26 01:26:41 +01001282 switch (cond) {
1283 case cc_always:
1284 b(offset);
1285 break;
1286 case eq:
1287 beq(rs, r2, offset);
1288 break;
1289 case ne:
1290 bne(rs, r2, offset);
1291 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001292 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001293 case greater:
1294 if (r2.is(zero_reg)) {
1295 bgtz(rs, offset);
1296 } else {
1297 slt(scratch, r2, rs);
1298 bne(scratch, zero_reg, offset);
1299 }
1300 break;
1301 case greater_equal:
1302 if (r2.is(zero_reg)) {
1303 bgez(rs, offset);
1304 } else {
1305 slt(scratch, rs, r2);
1306 beq(scratch, zero_reg, offset);
1307 }
1308 break;
1309 case less:
1310 if (r2.is(zero_reg)) {
1311 bltz(rs, offset);
1312 } else {
1313 slt(scratch, rs, r2);
1314 bne(scratch, zero_reg, offset);
1315 }
1316 break;
1317 case less_equal:
1318 if (r2.is(zero_reg)) {
1319 blez(rs, offset);
1320 } else {
1321 slt(scratch, r2, rs);
1322 beq(scratch, zero_reg, offset);
1323 }
1324 break;
Andrei Popescu31002712010-02-23 13:46:05 +00001325 // Unsigned comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001326 case Ugreater:
1327 if (r2.is(zero_reg)) {
1328 bgtz(rs, offset);
1329 } else {
1330 sltu(scratch, r2, rs);
1331 bne(scratch, zero_reg, offset);
1332 }
1333 break;
1334 case Ugreater_equal:
1335 if (r2.is(zero_reg)) {
1336 bgez(rs, offset);
1337 } else {
1338 sltu(scratch, rs, r2);
1339 beq(scratch, zero_reg, offset);
1340 }
1341 break;
1342 case Uless:
1343 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001344 // No code needs to be emitted.
1345 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001346 } else {
1347 sltu(scratch, rs, r2);
1348 bne(scratch, zero_reg, offset);
1349 }
1350 break;
1351 case Uless_equal:
1352 if (r2.is(zero_reg)) {
1353 b(offset);
1354 } else {
1355 sltu(scratch, r2, rs);
1356 beq(scratch, zero_reg, offset);
1357 }
1358 break;
1359 default:
1360 UNREACHABLE();
1361 }
1362 } else {
1363 // Be careful to always use shifted_branch_offset only just before the
1364 // branch instruction, as the location will be remember for patching the
1365 // target.
1366 switch (cond) {
1367 case cc_always:
1368 b(offset);
1369 break;
1370 case eq:
1371 // We don't want any other register but scratch clobbered.
1372 ASSERT(!scratch.is(rs));
1373 r2 = scratch;
1374 li(r2, rt);
1375 beq(rs, r2, offset);
1376 break;
1377 case ne:
1378 // We don't want any other register but scratch clobbered.
1379 ASSERT(!scratch.is(rs));
1380 r2 = scratch;
1381 li(r2, rt);
1382 bne(rs, r2, offset);
1383 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001384 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001385 case greater:
1386 if (rt.imm32_ == 0) {
1387 bgtz(rs, offset);
1388 } else {
1389 r2 = scratch;
1390 li(r2, rt);
1391 slt(scratch, r2, rs);
1392 bne(scratch, zero_reg, offset);
1393 }
1394 break;
1395 case greater_equal:
1396 if (rt.imm32_ == 0) {
1397 bgez(rs, offset);
1398 } else if (is_int16(rt.imm32_)) {
1399 slti(scratch, rs, rt.imm32_);
1400 beq(scratch, zero_reg, offset);
1401 } else {
1402 r2 = scratch;
1403 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001404 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001405 beq(scratch, zero_reg, offset);
1406 }
1407 break;
1408 case less:
1409 if (rt.imm32_ == 0) {
1410 bltz(rs, offset);
1411 } else if (is_int16(rt.imm32_)) {
1412 slti(scratch, rs, rt.imm32_);
1413 bne(scratch, zero_reg, offset);
1414 } else {
1415 r2 = scratch;
1416 li(r2, rt);
1417 slt(scratch, rs, r2);
1418 bne(scratch, zero_reg, offset);
1419 }
1420 break;
1421 case less_equal:
1422 if (rt.imm32_ == 0) {
1423 blez(rs, offset);
1424 } else {
1425 r2 = scratch;
1426 li(r2, rt);
1427 slt(scratch, r2, rs);
1428 beq(scratch, zero_reg, offset);
1429 }
1430 break;
1431 // Unsigned comparison.
1432 case Ugreater:
1433 if (rt.imm32_ == 0) {
1434 bgtz(rs, offset);
1435 } else {
1436 r2 = scratch;
1437 li(r2, rt);
1438 sltu(scratch, r2, rs);
1439 bne(scratch, zero_reg, offset);
1440 }
1441 break;
1442 case Ugreater_equal:
1443 if (rt.imm32_ == 0) {
1444 bgez(rs, offset);
1445 } else if (is_int16(rt.imm32_)) {
1446 sltiu(scratch, rs, rt.imm32_);
1447 beq(scratch, zero_reg, offset);
1448 } else {
1449 r2 = scratch;
1450 li(r2, rt);
1451 sltu(scratch, rs, r2);
1452 beq(scratch, zero_reg, offset);
1453 }
1454 break;
1455 case Uless:
1456 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001457 // No code needs to be emitted.
1458 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001459 } else if (is_int16(rt.imm32_)) {
1460 sltiu(scratch, rs, rt.imm32_);
1461 bne(scratch, zero_reg, offset);
1462 } else {
1463 r2 = scratch;
1464 li(r2, rt);
1465 sltu(scratch, rs, r2);
1466 bne(scratch, zero_reg, offset);
1467 }
1468 break;
1469 case Uless_equal:
1470 if (rt.imm32_ == 0) {
1471 b(offset);
1472 } else {
1473 r2 = scratch;
1474 li(r2, rt);
1475 sltu(scratch, r2, rs);
1476 beq(scratch, zero_reg, offset);
1477 }
1478 break;
1479 default:
1480 UNREACHABLE();
1481 }
Andrei Popescu31002712010-02-23 13:46:05 +00001482 }
Steve Block44f0eee2011-05-26 01:26:41 +01001483 // Emit a nop in the branch delay slot if required.
1484 if (bdslot == PROTECT)
1485 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001486}
1487
1488
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001489void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Andrei Popescu31002712010-02-23 13:46:05 +00001490 // We use branch_offset as an argument for the branch instructions to be sure
1491 // it is called just before generating the branch instruction, as needed.
1492
Steve Block44f0eee2011-05-26 01:26:41 +01001493 b(shifted_branch_offset(L, false));
Andrei Popescu31002712010-02-23 13:46:05 +00001494
Steve Block44f0eee2011-05-26 01:26:41 +01001495 // Emit a nop in the branch delay slot if required.
1496 if (bdslot == PROTECT)
1497 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001498}
1499
1500
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001501void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1502 const Operand& rt,
1503 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001504 BRANCH_ARGS_CHECK(cond, rs, rt);
1505
1506 int32_t offset;
1507 Register r2 = no_reg;
1508 Register scratch = at;
1509 if (rt.is_reg()) {
1510 r2 = rt.rm_;
1511 // Be careful to always use shifted_branch_offset only just before the
1512 // branch instruction, as the location will be remember for patching the
1513 // target.
1514 switch (cond) {
1515 case cc_always:
1516 offset = shifted_branch_offset(L, false);
1517 b(offset);
1518 break;
1519 case eq:
1520 offset = shifted_branch_offset(L, false);
1521 beq(rs, r2, offset);
1522 break;
1523 case ne:
1524 offset = shifted_branch_offset(L, false);
1525 bne(rs, r2, offset);
1526 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001527 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001528 case greater:
1529 if (r2.is(zero_reg)) {
1530 offset = shifted_branch_offset(L, false);
1531 bgtz(rs, offset);
1532 } else {
1533 slt(scratch, r2, rs);
1534 offset = shifted_branch_offset(L, false);
1535 bne(scratch, zero_reg, offset);
1536 }
1537 break;
1538 case greater_equal:
1539 if (r2.is(zero_reg)) {
1540 offset = shifted_branch_offset(L, false);
1541 bgez(rs, offset);
1542 } else {
1543 slt(scratch, rs, r2);
1544 offset = shifted_branch_offset(L, false);
1545 beq(scratch, zero_reg, offset);
1546 }
1547 break;
1548 case less:
1549 if (r2.is(zero_reg)) {
1550 offset = shifted_branch_offset(L, false);
1551 bltz(rs, offset);
1552 } else {
1553 slt(scratch, rs, r2);
1554 offset = shifted_branch_offset(L, false);
1555 bne(scratch, zero_reg, offset);
1556 }
1557 break;
1558 case less_equal:
1559 if (r2.is(zero_reg)) {
1560 offset = shifted_branch_offset(L, false);
1561 blez(rs, offset);
1562 } else {
1563 slt(scratch, r2, rs);
1564 offset = shifted_branch_offset(L, false);
1565 beq(scratch, zero_reg, offset);
1566 }
1567 break;
1568 // Unsigned comparison.
1569 case Ugreater:
1570 if (r2.is(zero_reg)) {
1571 offset = shifted_branch_offset(L, false);
1572 bgtz(rs, offset);
1573 } else {
1574 sltu(scratch, r2, rs);
1575 offset = shifted_branch_offset(L, false);
1576 bne(scratch, zero_reg, offset);
1577 }
1578 break;
1579 case Ugreater_equal:
1580 if (r2.is(zero_reg)) {
1581 offset = shifted_branch_offset(L, false);
1582 bgez(rs, offset);
1583 } else {
1584 sltu(scratch, rs, r2);
1585 offset = shifted_branch_offset(L, false);
1586 beq(scratch, zero_reg, offset);
1587 }
1588 break;
1589 case Uless:
1590 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001591 // No code needs to be emitted.
1592 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001593 } else {
1594 sltu(scratch, rs, r2);
1595 offset = shifted_branch_offset(L, false);
1596 bne(scratch, zero_reg, offset);
1597 }
1598 break;
1599 case Uless_equal:
1600 if (r2.is(zero_reg)) {
1601 offset = shifted_branch_offset(L, false);
1602 b(offset);
1603 } else {
1604 sltu(scratch, r2, rs);
1605 offset = shifted_branch_offset(L, false);
1606 beq(scratch, zero_reg, offset);
1607 }
1608 break;
1609 default:
1610 UNREACHABLE();
1611 }
1612 } else {
1613 // Be careful to always use shifted_branch_offset only just before the
1614 // branch instruction, as the location will be remember for patching the
1615 // target.
1616 switch (cond) {
1617 case cc_always:
1618 offset = shifted_branch_offset(L, false);
1619 b(offset);
1620 break;
1621 case eq:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001622 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001623 r2 = scratch;
1624 li(r2, rt);
1625 offset = shifted_branch_offset(L, false);
1626 beq(rs, r2, offset);
1627 break;
1628 case ne:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001629 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001630 r2 = scratch;
1631 li(r2, rt);
1632 offset = shifted_branch_offset(L, false);
1633 bne(rs, r2, offset);
1634 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001635 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001636 case greater:
1637 if (rt.imm32_ == 0) {
1638 offset = shifted_branch_offset(L, false);
1639 bgtz(rs, offset);
1640 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001641 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001642 r2 = scratch;
1643 li(r2, rt);
1644 slt(scratch, r2, rs);
1645 offset = shifted_branch_offset(L, false);
1646 bne(scratch, zero_reg, offset);
1647 }
1648 break;
1649 case greater_equal:
1650 if (rt.imm32_ == 0) {
1651 offset = shifted_branch_offset(L, false);
1652 bgez(rs, offset);
1653 } else if (is_int16(rt.imm32_)) {
1654 slti(scratch, rs, rt.imm32_);
1655 offset = shifted_branch_offset(L, false);
1656 beq(scratch, zero_reg, offset);
1657 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001658 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001659 r2 = scratch;
1660 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001661 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001662 offset = shifted_branch_offset(L, false);
1663 beq(scratch, zero_reg, offset);
1664 }
1665 break;
1666 case less:
1667 if (rt.imm32_ == 0) {
1668 offset = shifted_branch_offset(L, false);
1669 bltz(rs, offset);
1670 } else if (is_int16(rt.imm32_)) {
1671 slti(scratch, rs, rt.imm32_);
1672 offset = shifted_branch_offset(L, false);
1673 bne(scratch, zero_reg, offset);
1674 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001675 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001676 r2 = scratch;
1677 li(r2, rt);
1678 slt(scratch, rs, r2);
1679 offset = shifted_branch_offset(L, false);
1680 bne(scratch, zero_reg, offset);
1681 }
1682 break;
1683 case less_equal:
1684 if (rt.imm32_ == 0) {
1685 offset = shifted_branch_offset(L, false);
1686 blez(rs, offset);
1687 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001688 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001689 r2 = scratch;
1690 li(r2, rt);
1691 slt(scratch, r2, rs);
1692 offset = shifted_branch_offset(L, false);
1693 beq(scratch, zero_reg, offset);
1694 }
1695 break;
1696 // Unsigned comparison.
1697 case Ugreater:
1698 if (rt.imm32_ == 0) {
1699 offset = shifted_branch_offset(L, false);
1700 bgtz(rs, offset);
1701 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001702 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001703 r2 = scratch;
1704 li(r2, rt);
1705 sltu(scratch, r2, rs);
1706 offset = shifted_branch_offset(L, false);
1707 bne(scratch, zero_reg, offset);
1708 }
1709 break;
1710 case Ugreater_equal:
1711 if (rt.imm32_ == 0) {
1712 offset = shifted_branch_offset(L, false);
1713 bgez(rs, offset);
1714 } else if (is_int16(rt.imm32_)) {
1715 sltiu(scratch, rs, rt.imm32_);
1716 offset = shifted_branch_offset(L, false);
1717 beq(scratch, zero_reg, offset);
1718 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001719 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001720 r2 = scratch;
1721 li(r2, rt);
1722 sltu(scratch, rs, r2);
1723 offset = shifted_branch_offset(L, false);
1724 beq(scratch, zero_reg, offset);
1725 }
1726 break;
1727 case Uless:
1728 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001729 // No code needs to be emitted.
1730 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001731 } else if (is_int16(rt.imm32_)) {
1732 sltiu(scratch, rs, rt.imm32_);
1733 offset = shifted_branch_offset(L, false);
1734 bne(scratch, zero_reg, offset);
1735 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001736 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001737 r2 = scratch;
1738 li(r2, rt);
1739 sltu(scratch, rs, r2);
1740 offset = shifted_branch_offset(L, false);
1741 bne(scratch, zero_reg, offset);
1742 }
1743 break;
1744 case Uless_equal:
1745 if (rt.imm32_ == 0) {
1746 offset = shifted_branch_offset(L, false);
1747 b(offset);
1748 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001749 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001750 r2 = scratch;
1751 li(r2, rt);
1752 sltu(scratch, r2, rs);
1753 offset = shifted_branch_offset(L, false);
1754 beq(scratch, zero_reg, offset);
1755 }
1756 break;
1757 default:
1758 UNREACHABLE();
1759 }
1760 }
1761 // Check that offset could actually hold on an int16_t.
1762 ASSERT(is_int16(offset));
1763 // Emit a nop in the branch delay slot if required.
1764 if (bdslot == PROTECT)
1765 nop();
1766}
1767
1768
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001769void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
1770 BranchAndLinkShort(offset, bdslot);
1771}
1772
1773
1774void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1775 const Operand& rt,
1776 BranchDelaySlot bdslot) {
1777 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1778}
1779
1780
1781void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
1782 bool is_label_near = is_near(L);
1783 if (UseAbsoluteCodePointers() && !is_label_near) {
1784 Jalr(L, bdslot);
1785 } else {
1786 BranchAndLinkShort(L, bdslot);
1787 }
1788}
1789
1790
1791void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
1792 const Operand& rt,
1793 BranchDelaySlot bdslot) {
1794 bool is_label_near = is_near(L);
1795 if (UseAbsoluteCodePointers() && !is_label_near) {
1796 Label skip;
1797 Condition neg_cond = NegateCondition(cond);
1798 BranchShort(&skip, neg_cond, rs, rt);
1799 Jalr(L, bdslot);
1800 bind(&skip);
1801 } else {
1802 BranchAndLinkShort(L, cond, rs, rt, bdslot);
1803 }
1804}
1805
1806
Andrei Popescu31002712010-02-23 13:46:05 +00001807// We need to use a bgezal or bltzal, but they can't be used directly with the
1808// slt instructions. We could use sub or add instead but we would miss overflow
1809// cases, so we keep slt and add an intermediate third instruction.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001810void MacroAssembler::BranchAndLinkShort(int16_t offset,
1811 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001812 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001813
Steve Block44f0eee2011-05-26 01:26:41 +01001814 // Emit a nop in the branch delay slot if required.
1815 if (bdslot == PROTECT)
1816 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001817}
1818
1819
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001820void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
1821 Register rs, const Operand& rt,
1822 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001823 BRANCH_ARGS_CHECK(cond, rs, rt);
Steve Block6ded16b2010-05-10 14:33:55 +01001824 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001825 Register scratch = at;
1826
Andrei Popescu31002712010-02-23 13:46:05 +00001827 if (rt.is_reg()) {
1828 r2 = rt.rm_;
1829 } else if (cond != cc_always) {
1830 r2 = scratch;
1831 li(r2, rt);
1832 }
1833
1834 switch (cond) {
1835 case cc_always:
Steve Block44f0eee2011-05-26 01:26:41 +01001836 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001837 break;
1838 case eq:
1839 bne(rs, r2, 2);
1840 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01001841 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001842 break;
1843 case ne:
1844 beq(rs, r2, 2);
1845 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01001846 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001847 break;
1848
Ben Murdoch257744e2011-11-30 15:57:28 +00001849 // Signed comparison.
Andrei Popescu31002712010-02-23 13:46:05 +00001850 case greater:
1851 slt(scratch, r2, rs);
1852 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001853 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001854 break;
1855 case greater_equal:
1856 slt(scratch, rs, r2);
1857 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001858 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001859 break;
1860 case less:
1861 slt(scratch, rs, r2);
1862 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001863 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001864 break;
1865 case less_equal:
1866 slt(scratch, r2, rs);
1867 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001868 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001869 break;
1870
1871 // Unsigned comparison.
1872 case Ugreater:
1873 sltu(scratch, r2, rs);
1874 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001875 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001876 break;
1877 case Ugreater_equal:
1878 sltu(scratch, rs, r2);
1879 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001880 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001881 break;
1882 case Uless:
1883 sltu(scratch, rs, r2);
1884 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001885 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001886 break;
1887 case Uless_equal:
1888 sltu(scratch, r2, rs);
1889 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01001890 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00001891 break;
1892
1893 default:
1894 UNREACHABLE();
1895 }
Steve Block44f0eee2011-05-26 01:26:41 +01001896 // Emit a nop in the branch delay slot if required.
1897 if (bdslot == PROTECT)
1898 nop();
1899}
1900
1901
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001902void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001903 bal(shifted_branch_offset(L, false));
1904
1905 // Emit a nop in the branch delay slot if required.
1906 if (bdslot == PROTECT)
1907 nop();
1908}
1909
1910
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001911void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
1912 const Operand& rt,
1913 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001914 BRANCH_ARGS_CHECK(cond, rs, rt);
1915
1916 int32_t offset;
1917 Register r2 = no_reg;
1918 Register scratch = at;
1919 if (rt.is_reg()) {
1920 r2 = rt.rm_;
1921 } else if (cond != cc_always) {
1922 r2 = scratch;
1923 li(r2, rt);
1924 }
1925
1926 switch (cond) {
1927 case cc_always:
1928 offset = shifted_branch_offset(L, false);
1929 bal(offset);
1930 break;
1931 case eq:
1932 bne(rs, r2, 2);
1933 nop();
1934 offset = shifted_branch_offset(L, false);
1935 bal(offset);
1936 break;
1937 case ne:
1938 beq(rs, r2, 2);
1939 nop();
1940 offset = shifted_branch_offset(L, false);
1941 bal(offset);
1942 break;
1943
Ben Murdoch257744e2011-11-30 15:57:28 +00001944 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001945 case greater:
1946 slt(scratch, r2, rs);
1947 addiu(scratch, scratch, -1);
1948 offset = shifted_branch_offset(L, false);
1949 bgezal(scratch, offset);
1950 break;
1951 case greater_equal:
1952 slt(scratch, rs, r2);
1953 addiu(scratch, scratch, -1);
1954 offset = shifted_branch_offset(L, false);
1955 bltzal(scratch, offset);
1956 break;
1957 case less:
1958 slt(scratch, rs, r2);
1959 addiu(scratch, scratch, -1);
1960 offset = shifted_branch_offset(L, false);
1961 bgezal(scratch, offset);
1962 break;
1963 case less_equal:
1964 slt(scratch, r2, rs);
1965 addiu(scratch, scratch, -1);
1966 offset = shifted_branch_offset(L, false);
1967 bltzal(scratch, offset);
1968 break;
1969
1970 // Unsigned comparison.
1971 case Ugreater:
1972 sltu(scratch, r2, rs);
1973 addiu(scratch, scratch, -1);
1974 offset = shifted_branch_offset(L, false);
1975 bgezal(scratch, offset);
1976 break;
1977 case Ugreater_equal:
1978 sltu(scratch, rs, r2);
1979 addiu(scratch, scratch, -1);
1980 offset = shifted_branch_offset(L, false);
1981 bltzal(scratch, offset);
1982 break;
1983 case Uless:
1984 sltu(scratch, rs, r2);
1985 addiu(scratch, scratch, -1);
1986 offset = shifted_branch_offset(L, false);
1987 bgezal(scratch, offset);
1988 break;
1989 case Uless_equal:
1990 sltu(scratch, r2, rs);
1991 addiu(scratch, scratch, -1);
1992 offset = shifted_branch_offset(L, false);
1993 bltzal(scratch, offset);
1994 break;
1995
1996 default:
1997 UNREACHABLE();
1998 }
1999
2000 // Check that offset could actually hold on an int16_t.
2001 ASSERT(is_int16(offset));
2002
2003 // Emit a nop in the branch delay slot if required.
2004 if (bdslot == PROTECT)
2005 nop();
2006}
2007
2008
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002009void MacroAssembler::Jump(Register target,
Steve Block44f0eee2011-05-26 01:26:41 +01002010 Condition cond,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002011 Register rs,
2012 const Operand& rt,
2013 BranchDelaySlot bd) {
2014 BlockTrampolinePoolScope block_trampoline_pool(this);
2015 if (cond == cc_always) {
2016 jr(target);
2017 } else {
2018 BRANCH_ARGS_CHECK(cond, rs, rt);
2019 Branch(2, NegateCondition(cond), rs, rt);
2020 jr(target);
2021 }
2022 // Emit a nop in the branch delay slot if required.
2023 if (bd == PROTECT)
2024 nop();
2025}
2026
2027
2028void MacroAssembler::Jump(intptr_t target,
2029 RelocInfo::Mode rmode,
2030 Condition cond,
2031 Register rs,
2032 const Operand& rt,
2033 BranchDelaySlot bd) {
2034 li(t9, Operand(target, rmode));
2035 Jump(t9, cond, rs, rt, bd);
2036}
2037
2038
2039void MacroAssembler::Jump(Address target,
2040 RelocInfo::Mode rmode,
2041 Condition cond,
2042 Register rs,
2043 const Operand& rt,
2044 BranchDelaySlot bd) {
2045 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2046 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2047}
2048
2049
2050void MacroAssembler::Jump(Handle<Code> code,
2051 RelocInfo::Mode rmode,
2052 Condition cond,
2053 Register rs,
2054 const Operand& rt,
2055 BranchDelaySlot bd) {
2056 ASSERT(RelocInfo::IsCodeTarget(rmode));
2057 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2058}
2059
2060
2061int MacroAssembler::CallSize(Register target,
2062 Condition cond,
2063 Register rs,
2064 const Operand& rt,
2065 BranchDelaySlot bd) {
2066 int size = 0;
2067
2068 if (cond == cc_always) {
2069 size += 1;
2070 } else {
2071 size += 3;
Steve Block44f0eee2011-05-26 01:26:41 +01002072 }
2073
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002074 if (bd == PROTECT)
2075 size += 1;
Steve Block44f0eee2011-05-26 01:26:41 +01002076
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002077 return size * kInstrSize;
2078}
Steve Block44f0eee2011-05-26 01:26:41 +01002079
Steve Block44f0eee2011-05-26 01:26:41 +01002080
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002081// Note: To call gcc-compiled C code on mips, you must call thru t9.
2082void MacroAssembler::Call(Register target,
2083 Condition cond,
2084 Register rs,
2085 const Operand& rt,
2086 BranchDelaySlot bd) {
2087 BlockTrampolinePoolScope block_trampoline_pool(this);
2088 Label start;
2089 bind(&start);
2090 if (cond == cc_always) {
2091 jalr(target);
2092 } else {
2093 BRANCH_ARGS_CHECK(cond, rs, rt);
2094 Branch(2, NegateCondition(cond), rs, rt);
2095 jalr(target);
Steve Block44f0eee2011-05-26 01:26:41 +01002096 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002097 // Emit a nop in the branch delay slot if required.
2098 if (bd == PROTECT)
2099 nop();
2100
2101 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2102 SizeOfCodeGeneratedSince(&start));
2103}
2104
2105
2106int MacroAssembler::CallSize(Address target,
2107 RelocInfo::Mode rmode,
2108 Condition cond,
2109 Register rs,
2110 const Operand& rt,
2111 BranchDelaySlot bd) {
2112 int size = CallSize(t9, cond, rs, rt, bd);
2113 return size + 2 * kInstrSize;
2114}
2115
2116
2117void MacroAssembler::Call(Address target,
2118 RelocInfo::Mode rmode,
2119 Condition cond,
2120 Register rs,
2121 const Operand& rt,
2122 BranchDelaySlot bd) {
2123 BlockTrampolinePoolScope block_trampoline_pool(this);
2124 Label start;
2125 bind(&start);
2126 int32_t target_int = reinterpret_cast<int32_t>(target);
2127 // Must record previous source positions before the
2128 // li() generates a new code target.
2129 positions_recorder()->WriteRecordedPositions();
2130 li(t9, Operand(target_int, rmode), true);
2131 Call(t9, cond, rs, rt, bd);
2132 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2133 SizeOfCodeGeneratedSince(&start));
2134}
2135
2136
2137int MacroAssembler::CallSize(Handle<Code> code,
2138 RelocInfo::Mode rmode,
2139 unsigned ast_id,
2140 Condition cond,
2141 Register rs,
2142 const Operand& rt,
2143 BranchDelaySlot bd) {
2144 return CallSize(reinterpret_cast<Address>(code.location()),
2145 rmode, cond, rs, rt, bd);
2146}
2147
2148
2149void MacroAssembler::Call(Handle<Code> code,
2150 RelocInfo::Mode rmode,
2151 unsigned ast_id,
2152 Condition cond,
2153 Register rs,
2154 const Operand& rt,
2155 BranchDelaySlot bd) {
2156 BlockTrampolinePoolScope block_trampoline_pool(this);
2157 Label start;
2158 bind(&start);
2159 ASSERT(RelocInfo::IsCodeTarget(rmode));
2160 if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2161 SetRecordedAstId(ast_id);
2162 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2163 }
2164 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2165 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
2166 SizeOfCodeGeneratedSince(&start));
2167}
2168
2169
2170void MacroAssembler::Ret(Condition cond,
2171 Register rs,
2172 const Operand& rt,
2173 BranchDelaySlot bd) {
2174 Jump(ra, cond, rs, rt, bd);
2175}
2176
2177
2178void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2179 BlockTrampolinePoolScope block_trampoline_pool(this);
2180
2181 uint32_t imm28;
2182 imm28 = jump_address(L);
2183 imm28 &= kImm28Mask;
2184 { BlockGrowBufferScope block_buf_growth(this);
2185 // Buffer growth (and relocation) must be blocked for internal references
2186 // until associated instructions are emitted and available to be patched.
2187 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2188 j(imm28);
2189 }
2190 // Emit a nop in the branch delay slot if required.
2191 if (bdslot == PROTECT)
2192 nop();
2193}
2194
2195
2196void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2197 BlockTrampolinePoolScope block_trampoline_pool(this);
2198
2199 uint32_t imm32;
2200 imm32 = jump_address(L);
2201 { BlockGrowBufferScope block_buf_growth(this);
2202 // Buffer growth (and relocation) must be blocked for internal references
2203 // until associated instructions are emitted and available to be patched.
2204 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2205 lui(at, (imm32 & kHiMask) >> kLuiShift);
2206 ori(at, at, (imm32 & kImm16Mask));
2207 }
2208 jr(at);
2209
2210 // Emit a nop in the branch delay slot if required.
2211 if (bdslot == PROTECT)
2212 nop();
2213}
2214
2215
2216void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2217 BlockTrampolinePoolScope block_trampoline_pool(this);
2218
2219 uint32_t imm32;
2220 imm32 = jump_address(L);
2221 { BlockGrowBufferScope block_buf_growth(this);
2222 // Buffer growth (and relocation) must be blocked for internal references
2223 // until associated instructions are emitted and available to be patched.
2224 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2225 lui(at, (imm32 & kHiMask) >> kLuiShift);
2226 ori(at, at, (imm32 & kImm16Mask));
2227 }
2228 jalr(at);
2229
2230 // Emit a nop in the branch delay slot if required.
2231 if (bdslot == PROTECT)
2232 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002233}
2234
2235
2236void MacroAssembler::DropAndRet(int drop,
2237 Condition cond,
2238 Register r1,
2239 const Operand& r2) {
2240 // This is a workaround to make sure only one branch instruction is
2241 // generated. It relies on Drop and Ret not creating branches if
2242 // cond == cc_always.
2243 Label skip;
2244 if (cond != cc_always) {
2245 Branch(&skip, NegateCondition(cond), r1, r2);
2246 }
2247
2248 Drop(drop);
2249 Ret();
2250
2251 if (cond != cc_always) {
2252 bind(&skip);
2253 }
2254}
2255
2256
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002257void MacroAssembler::Drop(int count,
2258 Condition cond,
2259 Register reg,
2260 const Operand& op) {
2261 if (count <= 0) {
2262 return;
2263 }
2264
2265 Label skip;
2266
2267 if (cond != al) {
2268 Branch(&skip, NegateCondition(cond), reg, op);
2269 }
2270
2271 addiu(sp, sp, count * kPointerSize);
2272
2273 if (cond != al) {
2274 bind(&skip);
2275 }
2276}
2277
2278
2279
Steve Block44f0eee2011-05-26 01:26:41 +01002280void MacroAssembler::Swap(Register reg1,
2281 Register reg2,
2282 Register scratch) {
2283 if (scratch.is(no_reg)) {
2284 Xor(reg1, reg1, Operand(reg2));
2285 Xor(reg2, reg2, Operand(reg1));
2286 Xor(reg1, reg1, Operand(reg2));
2287 } else {
2288 mov(scratch, reg1);
2289 mov(reg1, reg2);
2290 mov(reg2, scratch);
2291 }
Andrei Popescu31002712010-02-23 13:46:05 +00002292}
2293
2294
2295void MacroAssembler::Call(Label* target) {
Steve Block44f0eee2011-05-26 01:26:41 +01002296 BranchAndLink(target);
2297}
2298
2299
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002300void MacroAssembler::Push(Handle<Object> handle) {
2301 li(at, Operand(handle));
2302 push(at);
2303}
2304
2305
Steve Block6ded16b2010-05-10 14:33:55 +01002306#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002307
Steve Block44f0eee2011-05-26 01:26:41 +01002308void MacroAssembler::DebugBreak() {
2309 ASSERT(allow_stub_calls());
2310 mov(a0, zero_reg);
2311 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2312 CEntryStub ces(1);
2313 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2314}
2315
2316#endif // ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002317
2318
Andrei Popescu31002712010-02-23 13:46:05 +00002319// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002320// Exception handling.
Andrei Popescu31002712010-02-23 13:46:05 +00002321
2322void MacroAssembler::PushTryHandler(CodeLocation try_location,
2323 HandlerType type) {
Steve Block6ded16b2010-05-10 14:33:55 +01002324 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002325 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2326 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2327 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2328 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2329 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2330 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
2331
Steve Block6ded16b2010-05-10 14:33:55 +01002332 // The return address is passed in register ra.
2333 if (try_location == IN_JAVASCRIPT) {
2334 if (type == TRY_CATCH_HANDLER) {
2335 li(t0, Operand(StackHandler::TRY_CATCH));
2336 } else {
2337 li(t0, Operand(StackHandler::TRY_FINALLY));
2338 }
Steve Block6ded16b2010-05-10 14:33:55 +01002339 // Save the current handler as the next handler.
Ben Murdoch589d6972011-11-30 16:04:58 +00002340 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01002341 lw(t1, MemOperand(t2));
2342
2343 addiu(sp, sp, -StackHandlerConstants::kSize);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002344 sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
2345 sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
2346 sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
2347 sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
2348 sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01002349
2350 // Link this handler as the new current one.
2351 sw(sp, MemOperand(t2));
2352
2353 } else {
2354 // Must preserve a0-a3, and s0 (argv).
2355 ASSERT(try_location == IN_JS_ENTRY);
Steve Block6ded16b2010-05-10 14:33:55 +01002356 // The frame pointer does not point to a JS frame so we save NULL
2357 // for fp. We expect the code throwing an exception to check fp
2358 // before dereferencing it to restore the context.
2359 li(t0, Operand(StackHandler::ENTRY));
2360
2361 // Save the current handler as the next handler.
Ben Murdoch589d6972011-11-30 16:04:58 +00002362 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01002363 lw(t1, MemOperand(t2));
2364
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002365 ASSERT(Smi::FromInt(0) == 0); // Used for no context.
2366
Steve Block6ded16b2010-05-10 14:33:55 +01002367 addiu(sp, sp, -StackHandlerConstants::kSize);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002368 sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
2369 sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
2370 sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
2371 sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
2372 sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01002373
2374 // Link this handler as the new current one.
2375 sw(sp, MemOperand(t2));
2376 }
Andrei Popescu31002712010-02-23 13:46:05 +00002377}
2378
2379
2380void MacroAssembler::PopTryHandler() {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002381 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01002382 pop(a1);
2383 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
Ben Murdoch589d6972011-11-30 16:04:58 +00002384 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01002385 sw(a1, MemOperand(at));
Andrei Popescu31002712010-02-23 13:46:05 +00002386}
2387
2388
Ben Murdoch257744e2011-11-30 15:57:28 +00002389void MacroAssembler::Throw(Register value) {
2390 // v0 is expected to hold the exception.
2391 Move(v0, value);
2392
2393 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002394 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2395 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2396 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2397 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2398 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2399 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002400
2401 // Drop the sp to the top of the handler.
Ben Murdoch589d6972011-11-30 16:04:58 +00002402 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002403 isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002404 lw(sp, MemOperand(a3));
2405
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002406 // Restore the next handler.
Ben Murdoch257744e2011-11-30 15:57:28 +00002407 pop(a2);
2408 sw(a2, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002409
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002410 // Restore context and frame pointer, discard state (a3).
2411 MultiPop(a3.bit() | cp.bit() | fp.bit());
2412
2413 // If the handler is a JS frame, restore the context to the frame.
2414 // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
2415 // of them.
Ben Murdoch257744e2011-11-30 15:57:28 +00002416 Label done;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002417 Branch(&done, eq, fp, Operand(zero_reg));
2418 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00002419 bind(&done);
2420
2421#ifdef DEBUG
2422 // When emitting debug_code, set ra as return address for the jump.
2423 // 5 instructions: add: 1, pop: 2, jump: 2.
2424 const int kOffsetRaInstructions = 5;
2425 Label find_ra;
2426
2427 if (emit_debug_code()) {
2428 // Compute ra for the Jump(t9).
2429 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
2430
2431 // This branch-and-link sequence is needed to get the current PC on mips,
2432 // saved to the ra register. Then adjusted for instruction count.
2433 bal(&find_ra); // bal exposes branch-delay.
2434 nop(); // Branch delay slot nop.
2435 bind(&find_ra);
2436 addiu(ra, ra, kOffsetRaBytes);
2437 }
2438#endif
2439
Ben Murdoch257744e2011-11-30 15:57:28 +00002440 pop(t9); // 2 instructions: lw, add sp.
2441 Jump(t9); // 2 instructions: jr, nop (in delay slot).
2442
2443 if (emit_debug_code()) {
2444 // Make sure that the expected number of instructions were generated.
2445 ASSERT_EQ(kOffsetRaInstructions,
2446 InstructionsGeneratedSince(&find_ra));
2447 }
2448}
2449
2450
2451void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2452 Register value) {
2453 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002454 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2455 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2456 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
2457 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
2458 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
2459 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002460
2461 // v0 is expected to hold the exception.
2462 Move(v0, value);
2463
2464 // Drop sp to the top stack handler.
Ben Murdoch589d6972011-11-30 16:04:58 +00002465 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002466 lw(sp, MemOperand(a3));
2467
2468 // Unwind the handlers until the ENTRY handler is found.
2469 Label loop, done;
2470 bind(&loop);
2471 // Load the type of the current stack handler.
2472 const int kStateOffset = StackHandlerConstants::kStateOffset;
2473 lw(a2, MemOperand(sp, kStateOffset));
2474 Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
2475 // Fetch the next handler in the list.
2476 const int kNextOffset = StackHandlerConstants::kNextOffset;
2477 lw(sp, MemOperand(sp, kNextOffset));
2478 jmp(&loop);
2479 bind(&done);
2480
2481 // Set the top handler address to next handler past the current ENTRY handler.
Ben Murdoch257744e2011-11-30 15:57:28 +00002482 pop(a2);
2483 sw(a2, MemOperand(a3));
2484
2485 if (type == OUT_OF_MEMORY) {
2486 // Set external caught exception to false.
2487 ExternalReference external_caught(
Ben Murdoch589d6972011-11-30 16:04:58 +00002488 Isolate::kExternalCaughtExceptionAddress, isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00002489 li(a0, Operand(false, RelocInfo::NONE));
2490 li(a2, Operand(external_caught));
2491 sw(a0, MemOperand(a2));
2492
2493 // Set pending exception and v0 to out of memory exception.
2494 Failure* out_of_memory = Failure::OutOfMemoryException();
2495 li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
Ben Murdoch589d6972011-11-30 16:04:58 +00002496 li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch257744e2011-11-30 15:57:28 +00002497 isolate())));
2498 sw(v0, MemOperand(a2));
2499 }
2500
2501 // Stack layout at this point. See also StackHandlerConstants.
2502 // sp -> state (ENTRY)
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002503 // cp
Ben Murdoch257744e2011-11-30 15:57:28 +00002504 // fp
2505 // ra
2506
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002507 // Restore context and frame pointer, discard state (r2).
2508 MultiPop(a2.bit() | cp.bit() | fp.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00002509
2510#ifdef DEBUG
2511 // When emitting debug_code, set ra as return address for the jump.
2512 // 5 instructions: add: 1, pop: 2, jump: 2.
2513 const int kOffsetRaInstructions = 5;
2514 Label find_ra;
2515
2516 if (emit_debug_code()) {
2517 // Compute ra for the Jump(t9).
2518 const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
2519
2520 // This branch-and-link sequence is needed to get the current PC on mips,
2521 // saved to the ra register. Then adjusted for instruction count.
2522 bal(&find_ra); // bal exposes branch-delay slot.
2523 nop(); // Branch delay slot nop.
2524 bind(&find_ra);
2525 addiu(ra, ra, kOffsetRaBytes);
2526 }
2527#endif
Ben Murdoch257744e2011-11-30 15:57:28 +00002528 pop(t9); // 2 instructions: lw, add sp.
2529 Jump(t9); // 2 instructions: jr, nop (in delay slot).
2530
2531 if (emit_debug_code()) {
2532 // Make sure that the expected number of instructions were generated.
2533 ASSERT_EQ(kOffsetRaInstructions,
2534 InstructionsGeneratedSince(&find_ra));
2535 }
2536}
2537
2538
Steve Block44f0eee2011-05-26 01:26:41 +01002539void MacroAssembler::AllocateInNewSpace(int object_size,
2540 Register result,
2541 Register scratch1,
2542 Register scratch2,
2543 Label* gc_required,
2544 AllocationFlags flags) {
2545 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002546 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002547 // Trash the registers to simulate an allocation failure.
2548 li(result, 0x7091);
2549 li(scratch1, 0x7191);
2550 li(scratch2, 0x7291);
2551 }
2552 jmp(gc_required);
2553 return;
Steve Block6ded16b2010-05-10 14:33:55 +01002554 }
2555
Steve Block44f0eee2011-05-26 01:26:41 +01002556 ASSERT(!result.is(scratch1));
2557 ASSERT(!result.is(scratch2));
2558 ASSERT(!scratch1.is(scratch2));
2559 ASSERT(!scratch1.is(t9));
2560 ASSERT(!scratch2.is(t9));
2561 ASSERT(!result.is(t9));
Steve Block6ded16b2010-05-10 14:33:55 +01002562
Steve Block44f0eee2011-05-26 01:26:41 +01002563 // Make object size into bytes.
2564 if ((flags & SIZE_IN_WORDS) != 0) {
2565 object_size *= kPointerSize;
2566 }
2567 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01002568
Steve Block44f0eee2011-05-26 01:26:41 +01002569 // Check relative positions of allocation top and limit addresses.
2570 // ARM adds additional checks to make sure the ldm instruction can be
2571 // used. On MIPS we don't have ldm so we don't need additional checks either.
2572 ExternalReference new_space_allocation_top =
2573 ExternalReference::new_space_allocation_top_address(isolate());
2574 ExternalReference new_space_allocation_limit =
2575 ExternalReference::new_space_allocation_limit_address(isolate());
2576 intptr_t top =
2577 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2578 intptr_t limit =
2579 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2580 ASSERT((limit - top) == kPointerSize);
2581
2582 // Set up allocation top address and object size registers.
2583 Register topaddr = scratch1;
2584 Register obj_size_reg = scratch2;
2585 li(topaddr, Operand(new_space_allocation_top));
2586 li(obj_size_reg, Operand(object_size));
2587
2588 // This code stores a temporary value in t9.
2589 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2590 // Load allocation top into result and allocation limit into t9.
2591 lw(result, MemOperand(topaddr));
2592 lw(t9, MemOperand(topaddr, kPointerSize));
2593 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002594 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002595 // Assert that result actually contains top on entry. t9 is used
2596 // immediately below so this use of t9 does not cause difference with
2597 // respect to register content between debug and release mode.
2598 lw(t9, MemOperand(topaddr));
2599 Check(eq, "Unexpected allocation top", result, Operand(t9));
2600 }
2601 // Load allocation limit into t9. Result already contains allocation top.
2602 lw(t9, MemOperand(topaddr, limit - top));
2603 }
2604
2605 // Calculate new top and bail out if new space is exhausted. Use result
2606 // to calculate the new top.
2607 Addu(scratch2, result, Operand(obj_size_reg));
2608 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2609 sw(scratch2, MemOperand(topaddr));
2610
2611 // Tag object if requested.
2612 if ((flags & TAG_OBJECT) != 0) {
2613 Addu(result, result, Operand(kHeapObjectTag));
2614 }
Steve Block6ded16b2010-05-10 14:33:55 +01002615}
2616
2617
Steve Block44f0eee2011-05-26 01:26:41 +01002618void MacroAssembler::AllocateInNewSpace(Register object_size,
2619 Register result,
2620 Register scratch1,
2621 Register scratch2,
2622 Label* gc_required,
2623 AllocationFlags flags) {
2624 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002625 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002626 // Trash the registers to simulate an allocation failure.
2627 li(result, 0x7091);
2628 li(scratch1, 0x7191);
2629 li(scratch2, 0x7291);
2630 }
2631 jmp(gc_required);
2632 return;
2633 }
2634
2635 ASSERT(!result.is(scratch1));
2636 ASSERT(!result.is(scratch2));
2637 ASSERT(!scratch1.is(scratch2));
2638 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2639
2640 // Check relative positions of allocation top and limit addresses.
2641 // ARM adds additional checks to make sure the ldm instruction can be
2642 // used. On MIPS we don't have ldm so we don't need additional checks either.
2643 ExternalReference new_space_allocation_top =
2644 ExternalReference::new_space_allocation_top_address(isolate());
2645 ExternalReference new_space_allocation_limit =
2646 ExternalReference::new_space_allocation_limit_address(isolate());
2647 intptr_t top =
2648 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2649 intptr_t limit =
2650 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2651 ASSERT((limit - top) == kPointerSize);
2652
2653 // Set up allocation top address and object size registers.
2654 Register topaddr = scratch1;
2655 li(topaddr, Operand(new_space_allocation_top));
2656
2657 // This code stores a temporary value in t9.
2658 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2659 // Load allocation top into result and allocation limit into t9.
2660 lw(result, MemOperand(topaddr));
2661 lw(t9, MemOperand(topaddr, kPointerSize));
2662 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002663 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002664 // Assert that result actually contains top on entry. t9 is used
2665 // immediately below so this use of t9 does not cause difference with
2666 // respect to register content between debug and release mode.
2667 lw(t9, MemOperand(topaddr));
2668 Check(eq, "Unexpected allocation top", result, Operand(t9));
2669 }
2670 // Load allocation limit into t9. Result already contains allocation top.
2671 lw(t9, MemOperand(topaddr, limit - top));
2672 }
2673
2674 // Calculate new top and bail out if new space is exhausted. Use result
2675 // to calculate the new top. Object size may be in words so a shift is
2676 // required to get the number of bytes.
2677 if ((flags & SIZE_IN_WORDS) != 0) {
2678 sll(scratch2, object_size, kPointerSizeLog2);
2679 Addu(scratch2, result, scratch2);
2680 } else {
2681 Addu(scratch2, result, Operand(object_size));
2682 }
2683 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2684
2685 // Update allocation top. result temporarily holds the new top.
Ben Murdoch257744e2011-11-30 15:57:28 +00002686 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002687 And(t9, scratch2, Operand(kObjectAlignmentMask));
2688 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2689 }
2690 sw(scratch2, MemOperand(topaddr));
2691
2692 // Tag object if requested.
2693 if ((flags & TAG_OBJECT) != 0) {
2694 Addu(result, result, Operand(kHeapObjectTag));
2695 }
2696}
2697
2698
2699void MacroAssembler::UndoAllocationInNewSpace(Register object,
2700 Register scratch) {
2701 ExternalReference new_space_allocation_top =
2702 ExternalReference::new_space_allocation_top_address(isolate());
2703
2704 // Make sure the object has no tag before resetting top.
2705 And(object, object, Operand(~kHeapObjectTagMask));
2706#ifdef DEBUG
2707 // Check that the object un-allocated is below the current top.
2708 li(scratch, Operand(new_space_allocation_top));
2709 lw(scratch, MemOperand(scratch));
2710 Check(less, "Undo allocation of non allocated memory",
2711 object, Operand(scratch));
2712#endif
2713 // Write the address of the object to un-allocate as the current top.
2714 li(scratch, Operand(new_space_allocation_top));
2715 sw(object, MemOperand(scratch));
2716}
2717
2718
2719void MacroAssembler::AllocateTwoByteString(Register result,
2720 Register length,
2721 Register scratch1,
2722 Register scratch2,
2723 Register scratch3,
2724 Label* gc_required) {
2725 // Calculate the number of bytes needed for the characters in the string while
2726 // observing object alignment.
2727 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2728 sll(scratch1, length, 1); // Length in bytes, not chars.
2729 addiu(scratch1, scratch1,
2730 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2731 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2732
2733 // Allocate two-byte string in new space.
2734 AllocateInNewSpace(scratch1,
2735 result,
2736 scratch2,
2737 scratch3,
2738 gc_required,
2739 TAG_OBJECT);
2740
2741 // Set the map, length and hash field.
2742 InitializeNewString(result,
2743 length,
2744 Heap::kStringMapRootIndex,
2745 scratch1,
2746 scratch2);
2747}
2748
2749
2750void MacroAssembler::AllocateAsciiString(Register result,
2751 Register length,
2752 Register scratch1,
2753 Register scratch2,
2754 Register scratch3,
2755 Label* gc_required) {
2756 // Calculate the number of bytes needed for the characters in the string
2757 // while observing object alignment.
2758 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2759 ASSERT(kCharSize == 1);
2760 addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2761 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2762
2763 // Allocate ASCII string in new space.
2764 AllocateInNewSpace(scratch1,
2765 result,
2766 scratch2,
2767 scratch3,
2768 gc_required,
2769 TAG_OBJECT);
2770
2771 // Set the map, length and hash field.
2772 InitializeNewString(result,
2773 length,
2774 Heap::kAsciiStringMapRootIndex,
2775 scratch1,
2776 scratch2);
2777}
2778
2779
2780void MacroAssembler::AllocateTwoByteConsString(Register result,
2781 Register length,
2782 Register scratch1,
2783 Register scratch2,
2784 Label* gc_required) {
2785 AllocateInNewSpace(ConsString::kSize,
2786 result,
2787 scratch1,
2788 scratch2,
2789 gc_required,
2790 TAG_OBJECT);
2791 InitializeNewString(result,
2792 length,
2793 Heap::kConsStringMapRootIndex,
2794 scratch1,
2795 scratch2);
2796}
2797
2798
2799void MacroAssembler::AllocateAsciiConsString(Register result,
2800 Register length,
2801 Register scratch1,
2802 Register scratch2,
2803 Label* gc_required) {
2804 AllocateInNewSpace(ConsString::kSize,
2805 result,
2806 scratch1,
2807 scratch2,
2808 gc_required,
2809 TAG_OBJECT);
2810 InitializeNewString(result,
2811 length,
2812 Heap::kConsAsciiStringMapRootIndex,
2813 scratch1,
2814 scratch2);
2815}
2816
2817
Ben Murdoch589d6972011-11-30 16:04:58 +00002818void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2819 Register length,
2820 Register scratch1,
2821 Register scratch2,
2822 Label* gc_required) {
2823 AllocateInNewSpace(SlicedString::kSize,
2824 result,
2825 scratch1,
2826 scratch2,
2827 gc_required,
2828 TAG_OBJECT);
2829
2830 InitializeNewString(result,
2831 length,
2832 Heap::kSlicedStringMapRootIndex,
2833 scratch1,
2834 scratch2);
2835}
2836
2837
2838void MacroAssembler::AllocateAsciiSlicedString(Register result,
2839 Register length,
2840 Register scratch1,
2841 Register scratch2,
2842 Label* gc_required) {
2843 AllocateInNewSpace(SlicedString::kSize,
2844 result,
2845 scratch1,
2846 scratch2,
2847 gc_required,
2848 TAG_OBJECT);
2849
2850 InitializeNewString(result,
2851 length,
2852 Heap::kSlicedAsciiStringMapRootIndex,
2853 scratch1,
2854 scratch2);
2855}
2856
2857
Steve Block44f0eee2011-05-26 01:26:41 +01002858// Allocates a heap number or jumps to the label if the young space is full and
2859// a scavenge is needed.
2860void MacroAssembler::AllocateHeapNumber(Register result,
2861 Register scratch1,
2862 Register scratch2,
2863 Register heap_number_map,
2864 Label* need_gc) {
2865 // Allocate an object in the heap for the heap number and tag it as a heap
2866 // object.
2867 AllocateInNewSpace(HeapNumber::kSize,
2868 result,
2869 scratch1,
2870 scratch2,
2871 need_gc,
2872 TAG_OBJECT);
2873
2874 // Store heap number map in the allocated object.
2875 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2876 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
2877}
2878
2879
2880void MacroAssembler::AllocateHeapNumberWithValue(Register result,
2881 FPURegister value,
2882 Register scratch1,
2883 Register scratch2,
2884 Label* gc_required) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002885 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
2886 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
Steve Block44f0eee2011-05-26 01:26:41 +01002887 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
2888}
2889
2890
2891// Copies a fixed number of fields of heap objects from src to dst.
2892void MacroAssembler::CopyFields(Register dst,
2893 Register src,
2894 RegList temps,
2895 int field_count) {
2896 ASSERT((temps & dst.bit()) == 0);
2897 ASSERT((temps & src.bit()) == 0);
2898 // Primitive implementation using only one temporary register.
2899
2900 Register tmp = no_reg;
2901 // Find a temp register in temps list.
2902 for (int i = 0; i < kNumRegisters; i++) {
2903 if ((temps & (1 << i)) != 0) {
2904 tmp.code_ = i;
2905 break;
2906 }
2907 }
2908 ASSERT(!tmp.is(no_reg));
2909
2910 for (int i = 0; i < field_count; i++) {
2911 lw(tmp, FieldMemOperand(src, i * kPointerSize));
2912 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
2913 }
2914}
2915
2916
Ben Murdoch257744e2011-11-30 15:57:28 +00002917void MacroAssembler::CopyBytes(Register src,
2918 Register dst,
2919 Register length,
2920 Register scratch) {
2921 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
2922
2923 // Align src before copying in word size chunks.
2924 bind(&align_loop);
2925 Branch(&done, eq, length, Operand(zero_reg));
2926 bind(&align_loop_1);
2927 And(scratch, src, kPointerSize - 1);
2928 Branch(&word_loop, eq, scratch, Operand(zero_reg));
2929 lbu(scratch, MemOperand(src));
2930 Addu(src, src, 1);
2931 sb(scratch, MemOperand(dst));
2932 Addu(dst, dst, 1);
2933 Subu(length, length, Operand(1));
2934 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2935
2936 // Copy bytes in word size chunks.
2937 bind(&word_loop);
2938 if (emit_debug_code()) {
2939 And(scratch, src, kPointerSize - 1);
2940 Assert(eq, "Expecting alignment for CopyBytes",
2941 scratch, Operand(zero_reg));
2942 }
2943 Branch(&byte_loop, lt, length, Operand(kPointerSize));
2944 lw(scratch, MemOperand(src));
2945 Addu(src, src, kPointerSize);
2946
2947 // TODO(kalmard) check if this can be optimized to use sw in most cases.
2948 // Can't use unaligned access - copy byte by byte.
2949 sb(scratch, MemOperand(dst, 0));
2950 srl(scratch, scratch, 8);
2951 sb(scratch, MemOperand(dst, 1));
2952 srl(scratch, scratch, 8);
2953 sb(scratch, MemOperand(dst, 2));
2954 srl(scratch, scratch, 8);
2955 sb(scratch, MemOperand(dst, 3));
2956 Addu(dst, dst, 4);
2957
2958 Subu(length, length, Operand(kPointerSize));
2959 Branch(&word_loop);
2960
2961 // Copy the last bytes if any left.
2962 bind(&byte_loop);
2963 Branch(&done, eq, length, Operand(zero_reg));
2964 bind(&byte_loop_1);
2965 lbu(scratch, MemOperand(src));
2966 Addu(src, src, 1);
2967 sb(scratch, MemOperand(dst));
2968 Addu(dst, dst, 1);
2969 Subu(length, length, Operand(1));
2970 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
2971 bind(&done);
2972}
2973
2974
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002975void MacroAssembler::CheckFastElements(Register map,
2976 Register scratch,
2977 Label* fail) {
Ben Murdoch589d6972011-11-30 16:04:58 +00002978 STATIC_ASSERT(FAST_ELEMENTS == 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002979 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2980 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
2981}
2982
2983
Steve Block44f0eee2011-05-26 01:26:41 +01002984void MacroAssembler::CheckMap(Register obj,
2985 Register scratch,
2986 Handle<Map> map,
2987 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00002988 SmiCheckType smi_check_type) {
2989 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01002990 JumpIfSmi(obj, fail);
2991 }
2992 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2993 li(at, Operand(map));
2994 Branch(fail, ne, scratch, Operand(at));
2995}
2996
2997
Ben Murdoch257744e2011-11-30 15:57:28 +00002998void MacroAssembler::DispatchMap(Register obj,
2999 Register scratch,
3000 Handle<Map> map,
3001 Handle<Code> success,
3002 SmiCheckType smi_check_type) {
3003 Label fail;
3004 if (smi_check_type == DO_SMI_CHECK) {
3005 JumpIfSmi(obj, &fail);
3006 }
3007 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3008 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3009 bind(&fail);
3010}
3011
3012
Steve Block44f0eee2011-05-26 01:26:41 +01003013void MacroAssembler::CheckMap(Register obj,
3014 Register scratch,
3015 Heap::RootListIndex index,
3016 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00003017 SmiCheckType smi_check_type) {
3018 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01003019 JumpIfSmi(obj, fail);
3020 }
3021 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3022 LoadRoot(at, index);
3023 Branch(fail, ne, scratch, Operand(at));
Steve Block6ded16b2010-05-10 14:33:55 +01003024}
3025
3026
Ben Murdoch257744e2011-11-30 15:57:28 +00003027void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3028 CpuFeatures::Scope scope(FPU);
3029 if (IsMipsSoftFloatABI) {
3030 Move(dst, v0, v1);
3031 } else {
3032 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3033 }
3034}
3035
3036
3037void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3038 CpuFeatures::Scope scope(FPU);
3039 if (!IsMipsSoftFloatABI) {
3040 Move(f12, dreg);
3041 } else {
3042 Move(a0, a1, dreg);
3043 }
3044}
3045
3046
3047void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3048 DoubleRegister dreg2) {
3049 CpuFeatures::Scope scope(FPU);
3050 if (!IsMipsSoftFloatABI) {
3051 if (dreg2.is(f12)) {
3052 ASSERT(!dreg1.is(f14));
3053 Move(f14, dreg2);
3054 Move(f12, dreg1);
3055 } else {
3056 Move(f12, dreg1);
3057 Move(f14, dreg2);
3058 }
3059 } else {
3060 Move(a0, a1, dreg1);
3061 Move(a2, a3, dreg2);
3062 }
3063}
3064
3065
3066void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3067 Register reg) {
3068 CpuFeatures::Scope scope(FPU);
3069 if (!IsMipsSoftFloatABI) {
3070 Move(f12, dreg);
3071 Move(a2, reg);
3072 } else {
3073 Move(a2, reg);
3074 Move(a0, a1, dreg);
3075 }
3076}
3077
3078
3079void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3080 // This macro takes the dst register to make the code more readable
3081 // at the call sites. However, the dst register has to be t1 to
3082 // follow the calling convention which requires the call type to be
3083 // in t1.
3084 ASSERT(dst.is(t1));
3085 if (call_kind == CALL_AS_FUNCTION) {
3086 li(dst, Operand(Smi::FromInt(1)));
3087 } else {
3088 li(dst, Operand(Smi::FromInt(0)));
3089 }
3090}
3091
3092
Steve Block6ded16b2010-05-10 14:33:55 +01003093// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003094// JavaScript invokes.
Steve Block6ded16b2010-05-10 14:33:55 +01003095
3096void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3097 const ParameterCount& actual,
3098 Handle<Code> code_constant,
3099 Register code_reg,
3100 Label* done,
Steve Block44f0eee2011-05-26 01:26:41 +01003101 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003102 const CallWrapper& call_wrapper,
3103 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003104 bool definitely_matches = false;
3105 Label regular_invoke;
3106
3107 // Check whether the expected and actual arguments count match. If not,
3108 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3109 // a0: actual arguments count
3110 // a1: function (passed through to callee)
3111 // a2: expected arguments count
3112 // a3: callee code entry
3113
3114 // The code below is made a lot easier because the calling code already sets
3115 // up actual and expected registers according to the contract if values are
3116 // passed in registers.
3117 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3118 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3119 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3120
3121 if (expected.is_immediate()) {
3122 ASSERT(actual.is_immediate());
3123 if (expected.immediate() == actual.immediate()) {
3124 definitely_matches = true;
3125 } else {
3126 li(a0, Operand(actual.immediate()));
3127 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3128 if (expected.immediate() == sentinel) {
3129 // Don't worry about adapting arguments for builtins that
3130 // don't want that done. Skip adaption code by making it look
3131 // like we have a match between expected and actual number of
3132 // arguments.
3133 definitely_matches = true;
3134 } else {
3135 li(a2, Operand(expected.immediate()));
3136 }
3137 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003138 } else if (actual.is_immediate()) {
3139 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3140 li(a0, Operand(actual.immediate()));
Steve Block6ded16b2010-05-10 14:33:55 +01003141 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003142 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
Steve Block6ded16b2010-05-10 14:33:55 +01003143 }
3144
3145 if (!definitely_matches) {
3146 if (!code_constant.is_null()) {
3147 li(a3, Operand(code_constant));
3148 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3149 }
3150
Steve Block44f0eee2011-05-26 01:26:41 +01003151 Handle<Code> adaptor =
3152 isolate()->builtins()->ArgumentsAdaptorTrampoline();
Steve Block6ded16b2010-05-10 14:33:55 +01003153 if (flag == CALL_FUNCTION) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003154 call_wrapper.BeforeCall(CallSize(adaptor));
Ben Murdoch257744e2011-11-30 15:57:28 +00003155 SetCallKind(t1, call_kind);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003156 Call(adaptor);
Ben Murdoch257744e2011-11-30 15:57:28 +00003157 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003158 jmp(done);
Steve Block6ded16b2010-05-10 14:33:55 +01003159 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003160 SetCallKind(t1, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003161 Jump(adaptor, RelocInfo::CODE_TARGET);
Steve Block6ded16b2010-05-10 14:33:55 +01003162 }
3163 bind(&regular_invoke);
3164 }
3165}
3166
Steve Block44f0eee2011-05-26 01:26:41 +01003167
Steve Block6ded16b2010-05-10 14:33:55 +01003168void MacroAssembler::InvokeCode(Register code,
3169 const ParameterCount& expected,
3170 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003171 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003172 const CallWrapper& call_wrapper,
3173 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003174 Label done;
3175
Steve Block44f0eee2011-05-26 01:26:41 +01003176 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003177 call_wrapper, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003178 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003179 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003180 Call(code);
3181 } else {
3182 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch257744e2011-11-30 15:57:28 +00003183 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003184 Jump(code);
3185 }
3186 // Continue here if InvokePrologue does handle the invocation due to
3187 // mismatched parameter counts.
3188 bind(&done);
3189}
3190
3191
3192void MacroAssembler::InvokeCode(Handle<Code> code,
3193 const ParameterCount& expected,
3194 const ParameterCount& actual,
3195 RelocInfo::Mode rmode,
Ben Murdoch257744e2011-11-30 15:57:28 +00003196 InvokeFlag flag,
3197 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003198 Label done;
3199
Ben Murdoch257744e2011-11-30 15:57:28 +00003200 InvokePrologue(expected, actual, code, no_reg, &done, flag,
3201 NullCallWrapper(), call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003202 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003203 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003204 Call(code, rmode);
3205 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003206 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003207 Jump(code, rmode);
3208 }
3209 // Continue here if InvokePrologue does handle the invocation due to
3210 // mismatched parameter counts.
3211 bind(&done);
3212}
3213
3214
3215void MacroAssembler::InvokeFunction(Register function,
3216 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003217 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003218 const CallWrapper& call_wrapper,
3219 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003220 // Contract with called JS functions requires that function is passed in a1.
3221 ASSERT(function.is(a1));
3222 Register expected_reg = a2;
3223 Register code_reg = a3;
3224
3225 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3226 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3227 lw(expected_reg,
3228 FieldMemOperand(code_reg,
3229 SharedFunctionInfo::kFormalParameterCountOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003230 sra(expected_reg, expected_reg, kSmiTagSize);
3231 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003232
3233 ParameterCount expected(expected_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003234 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003235}
3236
3237
3238void MacroAssembler::InvokeFunction(JSFunction* function,
3239 const ParameterCount& actual,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003240 InvokeFlag flag,
3241 CallKind call_kind) {
Steve Block44f0eee2011-05-26 01:26:41 +01003242 ASSERT(function->is_compiled());
3243
3244 // Get the function and setup the context.
3245 li(a1, Operand(Handle<JSFunction>(function)));
3246 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3247
3248 // Invoke the cached code.
3249 Handle<Code> code(function->code());
3250 ParameterCount expected(function->shared()->formal_parameter_count());
3251 if (V8::UseCrankshaft()) {
3252 UNIMPLEMENTED_MIPS();
3253 } else {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003254 InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003255 }
3256}
3257
3258
3259void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3260 Register map,
3261 Register scratch,
3262 Label* fail) {
3263 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3264 IsInstanceJSObjectType(map, scratch, fail);
3265}
3266
3267
3268void MacroAssembler::IsInstanceJSObjectType(Register map,
3269 Register scratch,
3270 Label* fail) {
3271 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003272 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3273 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
Steve Block44f0eee2011-05-26 01:26:41 +01003274}
3275
3276
3277void MacroAssembler::IsObjectJSStringType(Register object,
3278 Register scratch,
3279 Label* fail) {
3280 ASSERT(kNotStringTag != 0);
3281
3282 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3283 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3284 And(scratch, scratch, Operand(kIsNotStringMask));
3285 Branch(fail, ne, scratch, Operand(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01003286}
3287
3288
3289// ---------------------------------------------------------------------------
3290// Support functions.
3291
Steve Block44f0eee2011-05-26 01:26:41 +01003292
3293void MacroAssembler::TryGetFunctionPrototype(Register function,
3294 Register result,
3295 Register scratch,
3296 Label* miss) {
3297 // Check that the receiver isn't a smi.
3298 JumpIfSmi(function, miss);
3299
3300 // Check that the function really is a function. Load map into result reg.
3301 GetObjectType(function, result, scratch);
3302 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3303
3304 // Make sure that the function has an instance prototype.
3305 Label non_instance;
3306 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3307 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3308 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3309
3310 // Get the prototype or initial map from the function.
3311 lw(result,
3312 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3313
3314 // If the prototype or initial map is the hole, don't return it and
3315 // simply miss the cache instead. This will allow us to allocate a
3316 // prototype object on-demand in the runtime system.
3317 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3318 Branch(miss, eq, result, Operand(t8));
3319
3320 // If the function does not have an initial map, we're done.
3321 Label done;
3322 GetObjectType(result, scratch, scratch);
3323 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3324
3325 // Get the prototype from the initial map.
3326 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3327 jmp(&done);
3328
3329 // Non-instance prototype: Fetch prototype from constructor field
3330 // in initial map.
3331 bind(&non_instance);
3332 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3333
3334 // All done.
3335 bind(&done);
3336}
Steve Block6ded16b2010-05-10 14:33:55 +01003337
3338
Steve Block44f0eee2011-05-26 01:26:41 +01003339void MacroAssembler::GetObjectType(Register object,
3340 Register map,
3341 Register type_reg) {
3342 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3343 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3344}
Steve Block6ded16b2010-05-10 14:33:55 +01003345
3346
3347// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003348// Runtime calls.
Steve Block6ded16b2010-05-10 14:33:55 +01003349
Andrei Popescu31002712010-02-23 13:46:05 +00003350void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
3351 Register r1, const Operand& r2) {
Steve Block6ded16b2010-05-10 14:33:55 +01003352 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003353 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
Andrei Popescu31002712010-02-23 13:46:05 +00003354}
3355
3356
Ben Murdoch257744e2011-11-30 15:57:28 +00003357MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
3358 Register r1, const Operand& r2) {
3359 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3360 Object* result;
3361 { MaybeObject* maybe_result = stub->TryGetCode();
3362 if (!maybe_result->ToObject(&result)) return maybe_result;
3363 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003364 Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
3365 kNoASTId, cond, r1, r2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003366 return result;
3367}
3368
3369
Steve Block44f0eee2011-05-26 01:26:41 +01003370void MacroAssembler::TailCallStub(CodeStub* stub) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003371 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Steve Block44f0eee2011-05-26 01:26:41 +01003372 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003373}
3374
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003375
Ben Murdoch257744e2011-11-30 15:57:28 +00003376MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
3377 Condition cond,
3378 Register r1,
3379 const Operand& r2) {
3380 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
3381 Object* result;
3382 { MaybeObject* maybe_result = stub->TryGetCode();
3383 if (!maybe_result->ToObject(&result)) return maybe_result;
3384 }
3385 Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
3386 return result;
3387}
3388
3389
3390static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3391 return ref0.address() - ref1.address();
3392}
3393
3394
3395MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
3396 ExternalReference function, int stack_space) {
3397 ExternalReference next_address =
3398 ExternalReference::handle_scope_next_address();
3399 const int kNextOffset = 0;
3400 const int kLimitOffset = AddressOffset(
3401 ExternalReference::handle_scope_limit_address(),
3402 next_address);
3403 const int kLevelOffset = AddressOffset(
3404 ExternalReference::handle_scope_level_address(),
3405 next_address);
3406
3407 // Allocate HandleScope in callee-save registers.
3408 li(s3, Operand(next_address));
3409 lw(s0, MemOperand(s3, kNextOffset));
3410 lw(s1, MemOperand(s3, kLimitOffset));
3411 lw(s2, MemOperand(s3, kLevelOffset));
3412 Addu(s2, s2, Operand(1));
3413 sw(s2, MemOperand(s3, kLevelOffset));
3414
3415 // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3416 // (4 bytes) will be placed. This is also built into the Simulator.
3417 // Set up the pointer to the returned value (a0). It was allocated in
3418 // EnterExitFrame.
3419 addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3420
3421 // Native call returns to the DirectCEntry stub which redirects to the
3422 // return address pushed on stack (could have moved after GC).
3423 // DirectCEntry stub itself is generated early and never moves.
3424 DirectCEntryStub stub;
3425 stub.GenerateCall(this, function);
3426
3427 // As mentioned above, on MIPS a pointer is returned - we need to dereference
3428 // it to get the actual return value (which is also a pointer).
3429 lw(v0, MemOperand(v0));
3430
3431 Label promote_scheduled_exception;
3432 Label delete_allocated_handles;
3433 Label leave_exit_frame;
3434
3435 // If result is non-zero, dereference to get the result value
3436 // otherwise set it to undefined.
3437 Label skip;
3438 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3439 Branch(&skip, eq, v0, Operand(zero_reg));
3440 lw(a0, MemOperand(v0));
3441 bind(&skip);
3442 mov(v0, a0);
3443
3444 // No more valid handles (the result handle was the last one). Restore
3445 // previous handle scope.
3446 sw(s0, MemOperand(s3, kNextOffset));
3447 if (emit_debug_code()) {
3448 lw(a1, MemOperand(s3, kLevelOffset));
3449 Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3450 }
3451 Subu(s2, s2, Operand(1));
3452 sw(s2, MemOperand(s3, kLevelOffset));
3453 lw(at, MemOperand(s3, kLimitOffset));
3454 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3455
3456 // Check if the function scheduled an exception.
3457 bind(&leave_exit_frame);
3458 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3459 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3460 lw(t1, MemOperand(at));
3461 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3462 li(s0, Operand(stack_space));
3463 LeaveExitFrame(false, s0);
3464 Ret();
3465
3466 bind(&promote_scheduled_exception);
3467 MaybeObject* result = TryTailCallExternalReference(
3468 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
3469 if (result->IsFailure()) {
3470 return result;
3471 }
3472
3473 // HandleScope limit has changed. Delete allocated extensions.
3474 bind(&delete_allocated_handles);
3475 sw(s1, MemOperand(s3, kLimitOffset));
3476 mov(s0, v0);
3477 mov(a0, v0);
3478 PrepareCallCFunction(1, s1);
3479 li(a0, Operand(ExternalReference::isolate_address()));
3480 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3481 1);
3482 mov(v0, s0);
3483 jmp(&leave_exit_frame);
3484
3485 return result;
3486}
3487
Andrei Popescu31002712010-02-23 13:46:05 +00003488
Steve Block6ded16b2010-05-10 14:33:55 +01003489void MacroAssembler::IllegalOperation(int num_arguments) {
3490 if (num_arguments > 0) {
3491 addiu(sp, sp, num_arguments * kPointerSize);
3492 }
3493 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3494}
3495
3496
Steve Block44f0eee2011-05-26 01:26:41 +01003497void MacroAssembler::IndexFromHash(Register hash,
3498 Register index) {
3499 // If the hash field contains an array index pick it out. The assert checks
3500 // that the constants for the maximum number of digits for an array index
3501 // cached in the hash field and the number of bits reserved for it does not
3502 // conflict.
3503 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3504 (1 << String::kArrayIndexValueBits));
3505 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3506 // the low kHashShift bits.
3507 STATIC_ASSERT(kSmiTag == 0);
3508 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3509 sll(index, hash, kSmiTagSize);
3510}
3511
3512
3513void MacroAssembler::ObjectToDoubleFPURegister(Register object,
3514 FPURegister result,
3515 Register scratch1,
3516 Register scratch2,
3517 Register heap_number_map,
3518 Label* not_number,
3519 ObjectToDoubleFlags flags) {
3520 Label done;
3521 if ((flags & OBJECT_NOT_SMI) == 0) {
3522 Label not_smi;
3523 JumpIfNotSmi(object, &not_smi);
3524 // Remove smi tag and convert to double.
3525 sra(scratch1, object, kSmiTagSize);
3526 mtc1(scratch1, result);
3527 cvt_d_w(result, result);
3528 Branch(&done);
3529 bind(&not_smi);
3530 }
3531 // Check for heap number and load double value from it.
3532 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3533 Branch(not_number, ne, scratch1, Operand(heap_number_map));
3534
3535 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
3536 // If exponent is all ones the number is either a NaN or +/-Infinity.
3537 Register exponent = scratch1;
3538 Register mask_reg = scratch2;
3539 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
3540 li(mask_reg, HeapNumber::kExponentMask);
3541
3542 And(exponent, exponent, mask_reg);
3543 Branch(not_number, eq, exponent, Operand(mask_reg));
3544 }
3545 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
3546 bind(&done);
3547}
3548
3549
Steve Block44f0eee2011-05-26 01:26:41 +01003550void MacroAssembler::SmiToDoubleFPURegister(Register smi,
3551 FPURegister value,
3552 Register scratch1) {
3553 sra(scratch1, smi, kSmiTagSize);
3554 mtc1(scratch1, value);
3555 cvt_d_w(value, value);
3556}
3557
3558
Ben Murdoch257744e2011-11-30 15:57:28 +00003559void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3560 Register left,
3561 Register right,
3562 Register overflow_dst,
3563 Register scratch) {
3564 ASSERT(!dst.is(overflow_dst));
3565 ASSERT(!dst.is(scratch));
3566 ASSERT(!overflow_dst.is(scratch));
3567 ASSERT(!overflow_dst.is(left));
3568 ASSERT(!overflow_dst.is(right));
3569 ASSERT(!left.is(right));
3570
Ben Murdoch257744e2011-11-30 15:57:28 +00003571 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003572 mov(scratch, left); // Preserve left.
3573 addu(dst, left, right); // Left is overwritten.
3574 xor_(scratch, dst, scratch); // Original left.
3575 xor_(overflow_dst, dst, right);
3576 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003577 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003578 mov(scratch, right); // Preserve right.
3579 addu(dst, left, right); // Right is overwritten.
3580 xor_(scratch, dst, scratch); // Original right.
3581 xor_(overflow_dst, dst, left);
3582 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003583 } else {
3584 addu(dst, left, right);
3585 xor_(overflow_dst, dst, left);
3586 xor_(scratch, dst, right);
3587 and_(overflow_dst, scratch, overflow_dst);
3588 }
3589}
3590
3591
3592void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3593 Register left,
3594 Register right,
3595 Register overflow_dst,
3596 Register scratch) {
3597 ASSERT(!dst.is(overflow_dst));
3598 ASSERT(!dst.is(scratch));
3599 ASSERT(!overflow_dst.is(scratch));
3600 ASSERT(!overflow_dst.is(left));
3601 ASSERT(!overflow_dst.is(right));
3602 ASSERT(!left.is(right));
3603 ASSERT(!scratch.is(left));
3604 ASSERT(!scratch.is(right));
3605
Ben Murdoch257744e2011-11-30 15:57:28 +00003606 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003607 mov(scratch, left); // Preserve left.
3608 subu(dst, left, right); // Left is overwritten.
3609 xor_(overflow_dst, dst, scratch); // scratch is original left.
3610 xor_(scratch, scratch, right); // scratch is original left.
3611 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003612 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003613 mov(scratch, right); // Preserve right.
3614 subu(dst, left, right); // Right is overwritten.
3615 xor_(overflow_dst, dst, left);
3616 xor_(scratch, left, scratch); // Original right.
3617 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003618 } else {
3619 subu(dst, left, right);
3620 xor_(overflow_dst, dst, left);
3621 xor_(scratch, left, right);
3622 and_(overflow_dst, scratch, overflow_dst);
3623 }
3624}
3625
3626
Steve Block44f0eee2011-05-26 01:26:41 +01003627void MacroAssembler::CallRuntime(const Runtime::Function* f,
3628 int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003629 // All parameters are on the stack. v0 has the return value after call.
3630
3631 // If the expected number of arguments of the runtime function is
3632 // constant, we check that the actual number of arguments match the
3633 // expectation.
3634 if (f->nargs >= 0 && f->nargs != num_arguments) {
3635 IllegalOperation(num_arguments);
3636 return;
3637 }
3638
3639 // TODO(1236192): Most runtime routines don't need the number of
3640 // arguments passed in because it is constant. At some point we
3641 // should remove this need and make the runtime routine entry code
3642 // smarter.
3643 li(a0, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01003644 li(a1, Operand(ExternalReference(f, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01003645 CEntryStub stub(1);
3646 CallStub(&stub);
Andrei Popescu31002712010-02-23 13:46:05 +00003647}
3648
3649
Steve Block44f0eee2011-05-26 01:26:41 +01003650void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
3651 const Runtime::Function* function = Runtime::FunctionForId(id);
3652 li(a0, Operand(function->nargs));
3653 li(a1, Operand(ExternalReference(function, isolate())));
3654 CEntryStub stub(1);
3655 stub.SaveDoubles();
3656 CallStub(&stub);
3657}
3658
3659
Andrei Popescu31002712010-02-23 13:46:05 +00003660void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003661 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
3662}
3663
3664
Steve Block44f0eee2011-05-26 01:26:41 +01003665void MacroAssembler::CallExternalReference(const ExternalReference& ext,
3666 int num_arguments) {
3667 li(a0, Operand(num_arguments));
3668 li(a1, Operand(ext));
3669
3670 CEntryStub stub(1);
3671 CallStub(&stub);
3672}
3673
3674
Steve Block6ded16b2010-05-10 14:33:55 +01003675void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
3676 int num_arguments,
3677 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003678 // TODO(1236192): Most runtime routines don't need the number of
3679 // arguments passed in because it is constant. At some point we
3680 // should remove this need and make the runtime routine entry code
3681 // smarter.
3682 li(a0, Operand(num_arguments));
3683 JumpToExternalReference(ext);
Andrei Popescu31002712010-02-23 13:46:05 +00003684}
3685
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003686
Ben Murdoch257744e2011-11-30 15:57:28 +00003687MaybeObject* MacroAssembler::TryTailCallExternalReference(
3688 const ExternalReference& ext, int num_arguments, int result_size) {
3689 // TODO(1236192): Most runtime routines don't need the number of
3690 // arguments passed in because it is constant. At some point we
3691 // should remove this need and make the runtime routine entry code
3692 // smarter.
3693 li(a0, num_arguments);
3694 return TryJumpToExternalReference(ext);
3695}
3696
Andrei Popescu31002712010-02-23 13:46:05 +00003697
Steve Block6ded16b2010-05-10 14:33:55 +01003698void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
Andrei Popescu31002712010-02-23 13:46:05 +00003699 int num_arguments,
3700 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003701 TailCallExternalReference(ExternalReference(fid, isolate()),
3702 num_arguments,
3703 result_size);
Andrei Popescu31002712010-02-23 13:46:05 +00003704}
3705
3706
Steve Block6ded16b2010-05-10 14:33:55 +01003707void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Block44f0eee2011-05-26 01:26:41 +01003708 li(a1, Operand(builtin));
3709 CEntryStub stub(1);
3710 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003711}
3712
3713
Ben Murdoch257744e2011-11-30 15:57:28 +00003714MaybeObject* MacroAssembler::TryJumpToExternalReference(
3715 const ExternalReference& builtin) {
3716 li(a1, Operand(builtin));
3717 CEntryStub stub(1);
3718 return TryTailCallStub(&stub);
3719}
3720
3721
Andrei Popescu31002712010-02-23 13:46:05 +00003722void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Ben Murdoch257744e2011-11-30 15:57:28 +00003723 InvokeFlag flag,
3724 const CallWrapper& call_wrapper) {
Steve Block44f0eee2011-05-26 01:26:41 +01003725 GetBuiltinEntry(t9, id);
Ben Murdoch257744e2011-11-30 15:57:28 +00003726 if (flag == CALL_FUNCTION) {
3727 call_wrapper.BeforeCall(CallSize(t9));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003728 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01003729 Call(t9);
Ben Murdoch257744e2011-11-30 15:57:28 +00003730 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003731 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003732 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003733 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01003734 Jump(t9);
3735 }
3736}
3737
3738
3739void MacroAssembler::GetBuiltinFunction(Register target,
3740 Builtins::JavaScript id) {
3741 // Load the builtins object into target register.
3742 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3743 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
3744 // Load the JavaScript builtin function from the builtins object.
3745 lw(target, FieldMemOperand(target,
3746 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
Andrei Popescu31002712010-02-23 13:46:05 +00003747}
3748
3749
3750void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Steve Block44f0eee2011-05-26 01:26:41 +01003751 ASSERT(!target.is(a1));
3752 GetBuiltinFunction(a1, id);
3753 // Load the code entry point from the builtins object.
3754 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Andrei Popescu31002712010-02-23 13:46:05 +00003755}
3756
3757
3758void MacroAssembler::SetCounter(StatsCounter* counter, int value,
3759 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003760 if (FLAG_native_code_counters && counter->Enabled()) {
3761 li(scratch1, Operand(value));
3762 li(scratch2, Operand(ExternalReference(counter)));
3763 sw(scratch1, MemOperand(scratch2));
3764 }
Andrei Popescu31002712010-02-23 13:46:05 +00003765}
3766
3767
3768void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3769 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003770 ASSERT(value > 0);
3771 if (FLAG_native_code_counters && counter->Enabled()) {
3772 li(scratch2, Operand(ExternalReference(counter)));
3773 lw(scratch1, MemOperand(scratch2));
3774 Addu(scratch1, scratch1, Operand(value));
3775 sw(scratch1, MemOperand(scratch2));
3776 }
Andrei Popescu31002712010-02-23 13:46:05 +00003777}
3778
3779
3780void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
3781 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01003782 ASSERT(value > 0);
3783 if (FLAG_native_code_counters && counter->Enabled()) {
3784 li(scratch2, Operand(ExternalReference(counter)));
3785 lw(scratch1, MemOperand(scratch2));
3786 Subu(scratch1, scratch1, Operand(value));
3787 sw(scratch1, MemOperand(scratch2));
3788 }
Andrei Popescu31002712010-02-23 13:46:05 +00003789}
3790
3791
Steve Block6ded16b2010-05-10 14:33:55 +01003792// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003793// Debugging.
Andrei Popescu31002712010-02-23 13:46:05 +00003794
3795void MacroAssembler::Assert(Condition cc, const char* msg,
3796 Register rs, Operand rt) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003797 if (emit_debug_code())
Steve Block44f0eee2011-05-26 01:26:41 +01003798 Check(cc, msg, rs, rt);
3799}
3800
3801
3802void MacroAssembler::AssertRegisterIsRoot(Register reg,
3803 Heap::RootListIndex index) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003804 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003805 LoadRoot(at, index);
3806 Check(eq, "Register did not match expected root", reg, Operand(at));
3807 }
3808}
3809
3810
3811void MacroAssembler::AssertFastElements(Register elements) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003812 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003813 ASSERT(!elements.is(at));
3814 Label ok;
Ben Murdoch257744e2011-11-30 15:57:28 +00003815 push(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01003816 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
3817 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3818 Branch(&ok, eq, elements, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003819 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
3820 Branch(&ok, eq, elements, Operand(at));
Steve Block44f0eee2011-05-26 01:26:41 +01003821 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
3822 Branch(&ok, eq, elements, Operand(at));
3823 Abort("JSObject with fast elements map has slow elements");
3824 bind(&ok);
Ben Murdoch257744e2011-11-30 15:57:28 +00003825 pop(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01003826 }
Andrei Popescu31002712010-02-23 13:46:05 +00003827}
3828
3829
3830void MacroAssembler::Check(Condition cc, const char* msg,
3831 Register rs, Operand rt) {
Steve Block44f0eee2011-05-26 01:26:41 +01003832 Label L;
3833 Branch(&L, cc, rs, rt);
3834 Abort(msg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003835 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01003836 bind(&L);
Andrei Popescu31002712010-02-23 13:46:05 +00003837}
3838
3839
3840void MacroAssembler::Abort(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01003841 Label abort_start;
3842 bind(&abort_start);
3843 // We want to pass the msg string like a smi to avoid GC
3844 // problems, however msg is not guaranteed to be aligned
3845 // properly. Instead, we pass an aligned pointer that is
3846 // a proper v8 smi, but also pass the alignment difference
3847 // from the real pointer as a smi.
3848 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
3849 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
3850 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
3851#ifdef DEBUG
3852 if (msg != NULL) {
3853 RecordComment("Abort message: ");
3854 RecordComment(msg);
3855 }
3856#endif
3857 // Disable stub call restrictions to always allow calls to abort.
3858 AllowStubCallsScope allow_scope(this, true);
3859
3860 li(a0, Operand(p0));
Ben Murdoch257744e2011-11-30 15:57:28 +00003861 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003862 li(a0, Operand(Smi::FromInt(p1 - p0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00003863 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01003864 CallRuntime(Runtime::kAbort, 2);
Ben Murdoch257744e2011-11-30 15:57:28 +00003865 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01003866 if (is_trampoline_pool_blocked()) {
3867 // If the calling code cares about the exact number of
3868 // instructions generated, we insert padding here to keep the size
3869 // of the Abort macro constant.
3870 // Currently in debug mode with debug_code enabled the number of
3871 // generated instructions is 14, so we use this as a maximum value.
3872 static const int kExpectedAbortInstructions = 14;
3873 int abort_instructions = InstructionsGeneratedSince(&abort_start);
3874 ASSERT(abort_instructions <= kExpectedAbortInstructions);
3875 while (abort_instructions++ < kExpectedAbortInstructions) {
3876 nop();
3877 }
3878 }
3879}
3880
3881
3882void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
3883 if (context_chain_length > 0) {
3884 // Move up the chain of contexts to the context containing the slot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003885 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01003886 for (int i = 1; i < context_chain_length; i++) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003887 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01003888 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003889 } else {
3890 // Slot is in the current function context. Move it into the
3891 // destination register in case we store into it (the write barrier
3892 // cannot be allowed to destroy the context in esi).
3893 Move(dst, cp);
3894 }
Steve Block44f0eee2011-05-26 01:26:41 +01003895}
3896
3897
3898void MacroAssembler::LoadGlobalFunction(int index, Register function) {
3899 // Load the global or builtins object from the current context.
3900 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
3901 // Load the global context from the global or builtins object.
3902 lw(function, FieldMemOperand(function,
3903 GlobalObject::kGlobalContextOffset));
3904 // Load the function from the global context.
3905 lw(function, MemOperand(function, Context::SlotOffset(index)));
3906}
3907
3908
3909void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
3910 Register map,
3911 Register scratch) {
3912 // Load the initial map. The global functions all have initial maps.
3913 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00003914 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003915 Label ok, fail;
Ben Murdoch257744e2011-11-30 15:57:28 +00003916 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +01003917 Branch(&ok);
3918 bind(&fail);
3919 Abort("Global functions must have initial map");
3920 bind(&ok);
3921 }
Andrei Popescu31002712010-02-23 13:46:05 +00003922}
3923
Steve Block6ded16b2010-05-10 14:33:55 +01003924
3925void MacroAssembler::EnterFrame(StackFrame::Type type) {
3926 addiu(sp, sp, -5 * kPointerSize);
Steve Block44f0eee2011-05-26 01:26:41 +01003927 li(t8, Operand(Smi::FromInt(type)));
3928 li(t9, Operand(CodeObject()));
Steve Block6ded16b2010-05-10 14:33:55 +01003929 sw(ra, MemOperand(sp, 4 * kPointerSize));
3930 sw(fp, MemOperand(sp, 3 * kPointerSize));
3931 sw(cp, MemOperand(sp, 2 * kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003932 sw(t8, MemOperand(sp, 1 * kPointerSize));
3933 sw(t9, MemOperand(sp, 0 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01003934 addiu(fp, sp, 3 * kPointerSize);
3935}
3936
3937
3938void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3939 mov(sp, fp);
3940 lw(fp, MemOperand(sp, 0 * kPointerSize));
3941 lw(ra, MemOperand(sp, 1 * kPointerSize));
3942 addiu(sp, sp, 2 * kPointerSize);
3943}
3944
3945
Ben Murdoch257744e2011-11-30 15:57:28 +00003946void MacroAssembler::EnterExitFrame(bool save_doubles,
3947 int stack_space) {
3948 // Setup the frame structure on the stack.
3949 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
3950 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
3951 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
Steve Block6ded16b2010-05-10 14:33:55 +01003952
Ben Murdoch257744e2011-11-30 15:57:28 +00003953 // This is how the stack will look:
3954 // fp + 2 (==kCallerSPDisplacement) - old stack's end
3955 // [fp + 1 (==kCallerPCOffset)] - saved old ra
3956 // [fp + 0 (==kCallerFPOffset)] - saved old fp
3957 // [fp - 1 (==kSPOffset)] - sp of the called function
3958 // [fp - 2 (==kCodeOffset)] - CodeObject
3959 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
3960 // new stack (will contain saved ra)
Steve Block6ded16b2010-05-10 14:33:55 +01003961
3962 // Save registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00003963 addiu(sp, sp, -4 * kPointerSize);
3964 sw(ra, MemOperand(sp, 3 * kPointerSize));
3965 sw(fp, MemOperand(sp, 2 * kPointerSize));
3966 addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
Steve Block6ded16b2010-05-10 14:33:55 +01003967
Ben Murdoch257744e2011-11-30 15:57:28 +00003968 if (emit_debug_code()) {
3969 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
3970 }
3971
3972 li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
3973 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003974
3975 // Save the frame pointer and the context in top.
Ben Murdoch589d6972011-11-30 16:04:58 +00003976 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01003977 sw(fp, MemOperand(t8));
Ben Murdoch589d6972011-11-30 16:04:58 +00003978 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01003979 sw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01003980
Ben Murdoch257744e2011-11-30 15:57:28 +00003981 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
Steve Block44f0eee2011-05-26 01:26:41 +01003982 if (save_doubles) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003983 // The stack must be allign to 0 modulo 8 for stores with sdc1.
Steve Block44f0eee2011-05-26 01:26:41 +01003984 ASSERT(kDoubleSize == frame_alignment);
Ben Murdoch257744e2011-11-30 15:57:28 +00003985 if (frame_alignment > 0) {
3986 ASSERT(IsPowerOf2(frame_alignment));
3987 And(sp, sp, Operand(-frame_alignment)); // Align stack.
3988 }
3989 int space = FPURegister::kNumRegisters * kDoubleSize;
Steve Block44f0eee2011-05-26 01:26:41 +01003990 Subu(sp, sp, Operand(space));
3991 // Remember: we only need to save every 2nd double FPU value.
3992 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
3993 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00003994 sdc1(reg, MemOperand(sp, i * kDoubleSize));
Steve Block44f0eee2011-05-26 01:26:41 +01003995 }
Steve Block44f0eee2011-05-26 01:26:41 +01003996 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003997
3998 // Reserve place for the return address, stack space and an optional slot
3999 // (used by the DirectCEntryStub to hold the return value if a struct is
4000 // returned) and align the frame preparing for calling the runtime function.
4001 ASSERT(stack_space >= 0);
4002 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4003 if (frame_alignment > 0) {
4004 ASSERT(IsPowerOf2(frame_alignment));
4005 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4006 }
4007
4008 // Set the exit frame sp value to point just before the return address
4009 // location.
4010 addiu(at, sp, kPointerSize);
4011 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01004012}
4013
4014
Ben Murdoch257744e2011-11-30 15:57:28 +00004015void MacroAssembler::LeaveExitFrame(bool save_doubles,
4016 Register argument_count) {
Steve Block44f0eee2011-05-26 01:26:41 +01004017 // Optionally restore all double registers.
4018 if (save_doubles) {
Steve Block44f0eee2011-05-26 01:26:41 +01004019 // Remember: we only need to restore every 2nd double FPU value.
Ben Murdoch257744e2011-11-30 15:57:28 +00004020 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01004021 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4022 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00004023 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004024 }
4025 }
4026
Steve Block6ded16b2010-05-10 14:33:55 +01004027 // Clear top frame.
Ben Murdoch589d6972011-11-30 16:04:58 +00004028 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004029 sw(zero_reg, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004030
4031 // Restore current context from top and clear it in debug mode.
Ben Murdoch589d6972011-11-30 16:04:58 +00004032 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004033 lw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004034#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01004035 sw(a3, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004036#endif
4037
4038 // Pop the arguments, restore registers, and return.
4039 mov(sp, fp); // Respect ABI stack constraint.
Ben Murdoch257744e2011-11-30 15:57:28 +00004040 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4041 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4042 addiu(sp, sp, 8);
4043 if (argument_count.is_valid()) {
4044 sll(t8, argument_count, kPointerSizeLog2);
4045 addu(sp, sp, t8);
4046 }
Steve Block6ded16b2010-05-10 14:33:55 +01004047}
4048
4049
Steve Block44f0eee2011-05-26 01:26:41 +01004050void MacroAssembler::InitializeNewString(Register string,
4051 Register length,
4052 Heap::RootListIndex map_index,
4053 Register scratch1,
4054 Register scratch2) {
4055 sll(scratch1, length, kSmiTagSize);
4056 LoadRoot(scratch2, map_index);
4057 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4058 li(scratch1, Operand(String::kEmptyHashField));
4059 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4060 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4061}
4062
4063
4064int MacroAssembler::ActivationFrameAlignment() {
4065#if defined(V8_HOST_ARCH_MIPS)
4066 // Running on the real platform. Use the alignment as mandated by the local
4067 // environment.
4068 // Note: This will break if we ever start generating snapshots on one Mips
4069 // platform for another Mips platform with a different alignment.
4070 return OS::ActivationFrameAlignment();
4071#else // defined(V8_HOST_ARCH_MIPS)
4072 // If we are using the simulator then we should always align to the expected
4073 // alignment. As the simulator is used to generate snapshots we do not know
4074 // if the target platform will need alignment, so this is controlled from a
4075 // flag.
4076 return FLAG_sim_stack_alignment;
4077#endif // defined(V8_HOST_ARCH_MIPS)
4078}
4079
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004080
Ben Murdoch257744e2011-11-30 15:57:28 +00004081void MacroAssembler::AssertStackIsAligned() {
4082 if (emit_debug_code()) {
4083 const int frame_alignment = ActivationFrameAlignment();
4084 const int frame_alignment_mask = frame_alignment - 1;
Steve Block44f0eee2011-05-26 01:26:41 +01004085
Ben Murdoch257744e2011-11-30 15:57:28 +00004086 if (frame_alignment > kPointerSize) {
4087 Label alignment_as_expected;
4088 ASSERT(IsPowerOf2(frame_alignment));
4089 andi(at, sp, frame_alignment_mask);
4090 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4091 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4092 stop("Unexpected stack alignment");
4093 bind(&alignment_as_expected);
4094 }
Steve Block6ded16b2010-05-10 14:33:55 +01004095 }
Steve Block6ded16b2010-05-10 14:33:55 +01004096}
4097
Steve Block44f0eee2011-05-26 01:26:41 +01004098
Steve Block44f0eee2011-05-26 01:26:41 +01004099void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4100 Register reg,
4101 Register scratch,
4102 Label* not_power_of_two_or_zero) {
4103 Subu(scratch, reg, Operand(1));
4104 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4105 scratch, Operand(zero_reg));
4106 and_(at, scratch, reg); // In the delay slot.
4107 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4108}
4109
4110
4111void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4112 Register reg2,
4113 Label* on_not_both_smi) {
4114 STATIC_ASSERT(kSmiTag == 0);
4115 ASSERT_EQ(1, kSmiTagMask);
4116 or_(at, reg1, reg2);
4117 andi(at, at, kSmiTagMask);
4118 Branch(on_not_both_smi, ne, at, Operand(zero_reg));
4119}
4120
4121
4122void MacroAssembler::JumpIfEitherSmi(Register reg1,
4123 Register reg2,
4124 Label* on_either_smi) {
4125 STATIC_ASSERT(kSmiTag == 0);
4126 ASSERT_EQ(1, kSmiTagMask);
4127 // Both Smi tags must be 1 (not Smi).
4128 and_(at, reg1, reg2);
4129 andi(at, at, kSmiTagMask);
4130 Branch(on_either_smi, eq, at, Operand(zero_reg));
4131}
4132
4133
4134void MacroAssembler::AbortIfSmi(Register object) {
4135 STATIC_ASSERT(kSmiTag == 0);
4136 andi(at, object, kSmiTagMask);
4137 Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4138}
4139
4140
4141void MacroAssembler::AbortIfNotSmi(Register object) {
4142 STATIC_ASSERT(kSmiTag == 0);
4143 andi(at, object, kSmiTagMask);
4144 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4145}
4146
4147
Ben Murdoch257744e2011-11-30 15:57:28 +00004148void MacroAssembler::AbortIfNotString(Register object) {
4149 STATIC_ASSERT(kSmiTag == 0);
4150 And(t0, object, Operand(kSmiTagMask));
4151 Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4152 push(object);
4153 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4154 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4155 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4156 pop(object);
4157}
4158
4159
Steve Block44f0eee2011-05-26 01:26:41 +01004160void MacroAssembler::AbortIfNotRootValue(Register src,
4161 Heap::RootListIndex root_value_index,
4162 const char* message) {
4163 ASSERT(!src.is(at));
4164 LoadRoot(at, root_value_index);
4165 Assert(eq, message, src, Operand(at));
4166}
4167
4168
4169void MacroAssembler::JumpIfNotHeapNumber(Register object,
4170 Register heap_number_map,
4171 Register scratch,
4172 Label* on_not_heap_number) {
4173 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4174 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4175 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4176}
4177
4178
4179void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4180 Register first,
4181 Register second,
4182 Register scratch1,
4183 Register scratch2,
4184 Label* failure) {
4185 // Test that both first and second are sequential ASCII strings.
4186 // Assume that they are non-smis.
4187 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4188 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4189 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4190 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4191
4192 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4193 scratch2,
4194 scratch1,
4195 scratch2,
4196 failure);
4197}
4198
4199
4200void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4201 Register second,
4202 Register scratch1,
4203 Register scratch2,
4204 Label* failure) {
4205 // Check that neither is a smi.
4206 STATIC_ASSERT(kSmiTag == 0);
4207 And(scratch1, first, Operand(second));
4208 And(scratch1, scratch1, Operand(kSmiTagMask));
4209 Branch(failure, eq, scratch1, Operand(zero_reg));
4210 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4211 second,
4212 scratch1,
4213 scratch2,
4214 failure);
4215}
4216
4217
4218void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4219 Register first,
4220 Register second,
4221 Register scratch1,
4222 Register scratch2,
4223 Label* failure) {
4224 int kFlatAsciiStringMask =
4225 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4226 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4227 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4228 andi(scratch1, first, kFlatAsciiStringMask);
4229 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4230 andi(scratch2, second, kFlatAsciiStringMask);
4231 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4232}
4233
4234
4235void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4236 Register scratch,
4237 Label* failure) {
4238 int kFlatAsciiStringMask =
4239 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4240 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4241 And(scratch, type, Operand(kFlatAsciiStringMask));
4242 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4243}
4244
4245
4246static const int kRegisterPassedArguments = 4;
4247
4248void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
4249 int frame_alignment = ActivationFrameAlignment();
4250
Steve Block44f0eee2011-05-26 01:26:41 +01004251 // Up to four simple arguments are passed in registers a0..a3.
4252 // Those four arguments must have reserved argument slots on the stack for
4253 // mips, even though those argument slots are not normally used.
4254 // Remaining arguments are pushed on the stack, above (higher address than)
4255 // the argument slots.
Steve Block44f0eee2011-05-26 01:26:41 +01004256 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
4257 0 : num_arguments - kRegisterPassedArguments) +
Ben Murdoch589d6972011-11-30 16:04:58 +00004258 kCArgSlotCount;
Steve Block44f0eee2011-05-26 01:26:41 +01004259 if (frame_alignment > kPointerSize) {
4260 // Make stack end at alignment and make room for num_arguments - 4 words
4261 // and the original value of sp.
4262 mov(scratch, sp);
4263 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4264 ASSERT(IsPowerOf2(frame_alignment));
4265 And(sp, sp, Operand(-frame_alignment));
4266 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4267 } else {
4268 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4269 }
4270}
4271
4272
4273void MacroAssembler::CallCFunction(ExternalReference function,
4274 int num_arguments) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004275 CallCFunctionHelper(no_reg, function, t8, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004276}
4277
4278
4279void MacroAssembler::CallCFunction(Register function,
4280 Register scratch,
4281 int num_arguments) {
4282 CallCFunctionHelper(function,
4283 ExternalReference::the_hole_value_location(isolate()),
4284 scratch,
4285 num_arguments);
4286}
4287
4288
4289void MacroAssembler::CallCFunctionHelper(Register function,
4290 ExternalReference function_reference,
4291 Register scratch,
4292 int num_arguments) {
Steve Block44f0eee2011-05-26 01:26:41 +01004293 // Make sure that the stack is aligned before calling a C function unless
4294 // running in the simulator. The simulator has its own alignment check which
4295 // provides more information.
4296 // The argument stots are presumed to have been set up by
4297 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4298
4299#if defined(V8_HOST_ARCH_MIPS)
4300 if (emit_debug_code()) {
4301 int frame_alignment = OS::ActivationFrameAlignment();
4302 int frame_alignment_mask = frame_alignment - 1;
4303 if (frame_alignment > kPointerSize) {
4304 ASSERT(IsPowerOf2(frame_alignment));
4305 Label alignment_as_expected;
4306 And(at, sp, Operand(frame_alignment_mask));
4307 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4308 // Don't use Check here, as it will call Runtime_Abort possibly
4309 // re-entering here.
4310 stop("Unexpected alignment in CallCFunction");
4311 bind(&alignment_as_expected);
4312 }
4313 }
4314#endif // V8_HOST_ARCH_MIPS
4315
4316 // Just call directly. The function called cannot cause a GC, or
4317 // allow preemption, so the return address in the link register
4318 // stays correct.
Steve Block44f0eee2011-05-26 01:26:41 +01004319
4320 if (function.is(no_reg)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004321 function = t9;
4322 li(function, Operand(function_reference));
4323 } else if (!function.is(t9)) {
4324 mov(t9, function);
Steve Block44f0eee2011-05-26 01:26:41 +01004325 function = t9;
4326 }
4327
4328 Call(function);
4329
Steve Block44f0eee2011-05-26 01:26:41 +01004330 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
4331 0 : num_arguments - kRegisterPassedArguments) +
Ben Murdoch589d6972011-11-30 16:04:58 +00004332 kCArgSlotCount;
Steve Block44f0eee2011-05-26 01:26:41 +01004333
4334 if (OS::ActivationFrameAlignment() > kPointerSize) {
4335 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4336 } else {
4337 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4338 }
4339}
4340
4341
4342#undef BRANCH_ARGS_CHECK
4343
4344
Ben Murdoch257744e2011-11-30 15:57:28 +00004345void MacroAssembler::LoadInstanceDescriptors(Register map,
4346 Register descriptors) {
4347 lw(descriptors,
4348 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
4349 Label not_smi;
4350 JumpIfNotSmi(descriptors, &not_smi);
4351 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
4352 bind(&not_smi);
4353}
4354
4355
Steve Block44f0eee2011-05-26 01:26:41 +01004356CodePatcher::CodePatcher(byte* address, int instructions)
4357 : address_(address),
4358 instructions_(instructions),
4359 size_(instructions * Assembler::kInstrSize),
Ben Murdoch257744e2011-11-30 15:57:28 +00004360 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
Steve Block44f0eee2011-05-26 01:26:41 +01004361 // Create a new macro assembler pointing to the address of the code to patch.
4362 // The size is adjusted with kGap on order for the assembler to generate size
4363 // bytes of instructions without failing with buffer size constraints.
4364 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4365}
4366
4367
4368CodePatcher::~CodePatcher() {
4369 // Indicate that code has changed.
4370 CPU::FlushICache(address_, size_);
4371
4372 // Check that the code was patched as expected.
4373 ASSERT(masm_.pc_ == address_ + size_);
4374 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4375}
4376
4377
Ben Murdoch257744e2011-11-30 15:57:28 +00004378void CodePatcher::Emit(Instr instr) {
4379 masm()->emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +01004380}
4381
4382
4383void CodePatcher::Emit(Address addr) {
4384 masm()->emit(reinterpret_cast<Instr>(addr));
4385}
4386
4387
Ben Murdoch257744e2011-11-30 15:57:28 +00004388void CodePatcher::ChangeBranchCondition(Condition cond) {
4389 Instr instr = Assembler::instr_at(masm_.pc_);
4390 ASSERT(Assembler::IsBranch(instr));
4391 uint32_t opcode = Assembler::GetOpcodeField(instr);
4392 // Currently only the 'eq' and 'ne' cond values are supported and the simple
4393 // branch instructions (with opcode being the branch type).
4394 // There are some special cases (see Assembler::IsBranch()) so extending this
4395 // would be tricky.
4396 ASSERT(opcode == BEQ ||
4397 opcode == BNE ||
4398 opcode == BLEZ ||
4399 opcode == BGTZ ||
4400 opcode == BEQL ||
4401 opcode == BNEL ||
4402 opcode == BLEZL ||
4403 opcode == BGTZL);
4404 opcode = (cond == eq) ? BEQ : BNE;
4405 instr = (instr & ~kOpcodeMask) | opcode;
4406 masm_.emit(instr);
4407}
Steve Block44f0eee2011-05-26 01:26:41 +01004408
4409
Andrei Popescu31002712010-02-23 13:46:05 +00004410} } // namespace v8::internal
4411
Leon Clarkef7060e22010-06-03 12:02:55 +01004412#endif // V8_TARGET_ARCH_MIPS