blob: a6dcddc9cd0210077c26a791b8ebf2e8502bd32f [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Ben Murdoch257744e2011-11-30 15:57:28 +000028#include <limits.h> // For LONG_MIN, LONG_MAX.
Andrei Popescu31002712010-02-23 13:46:05 +000029
30#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_MIPS)
33
Andrei Popescu31002712010-02-23 13:46:05 +000034#include "bootstrapper.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000035#include "codegen.h"
Andrei Popescu31002712010-02-23 13:46:05 +000036#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
Ben Murdoch257744e2011-11-30 15:57:28 +000042MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size),
Andrei Popescu31002712010-02-23 13:46:05 +000044 generating_stub_(false),
Ben Murdoch592a9fc2012-03-05 11:04:45 +000045 allow_stub_calls_(true),
46 has_frame_(false) {
Ben Murdoch257744e2011-11-30 15:57:28 +000047 if (isolate() != NULL) {
48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49 isolate());
50 }
Andrei Popescu31002712010-02-23 13:46:05 +000051}
52
53
Andrei Popescu31002712010-02-23 13:46:05 +000054void MacroAssembler::LoadRoot(Register destination,
55 Heap::RootListIndex index) {
Steve Block6ded16b2010-05-10 14:33:55 +010056 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000057}
58
Steve Block44f0eee2011-05-26 01:26:41 +010059
Andrei Popescu31002712010-02-23 13:46:05 +000060void MacroAssembler::LoadRoot(Register destination,
61 Heap::RootListIndex index,
62 Condition cond,
63 Register src1, const Operand& src2) {
Steve Block44f0eee2011-05-26 01:26:41 +010064 Branch(2, NegateCondition(cond), src1, src2);
Steve Block6ded16b2010-05-10 14:33:55 +010065 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000066}
67
68
Steve Block44f0eee2011-05-26 01:26:41 +010069void MacroAssembler::StoreRoot(Register source,
70 Heap::RootListIndex index) {
71 sw(source, MemOperand(s6, index << kPointerSizeLog2));
72}
73
74
75void MacroAssembler::StoreRoot(Register source,
76 Heap::RootListIndex index,
77 Condition cond,
78 Register src1, const Operand& src2) {
79 Branch(2, NegateCondition(cond), src1, src2);
80 sw(source, MemOperand(s6, index << kPointerSizeLog2));
81}
82
83
Ben Murdoch257744e2011-11-30 15:57:28 +000084// Push and pop all registers that can hold pointers.
85void MacroAssembler::PushSafepointRegisters() {
86 // Safepoints expect a block of kNumSafepointRegisters values on the
87 // stack, so adjust the stack for unsaved registers.
88 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
89 ASSERT(num_unsaved >= 0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +000090 if (num_unsaved > 0) {
91 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
92 }
Ben Murdoch257744e2011-11-30 15:57:28 +000093 MultiPush(kSafepointSavedRegisters);
94}
95
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000096
Ben Murdoch257744e2011-11-30 15:57:28 +000097void MacroAssembler::PopSafepointRegisters() {
98 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
99 MultiPop(kSafepointSavedRegisters);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000100 if (num_unsaved > 0) {
101 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
102 }
Ben Murdoch257744e2011-11-30 15:57:28 +0000103}
104
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000105
Ben Murdoch257744e2011-11-30 15:57:28 +0000106void MacroAssembler::PushSafepointRegistersAndDoubles() {
107 PushSafepointRegisters();
108 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
109 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
110 FPURegister reg = FPURegister::FromAllocationIndex(i);
111 sdc1(reg, MemOperand(sp, i * kDoubleSize));
112 }
113}
114
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000115
Ben Murdoch257744e2011-11-30 15:57:28 +0000116void MacroAssembler::PopSafepointRegistersAndDoubles() {
117 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
118 FPURegister reg = FPURegister::FromAllocationIndex(i);
119 ldc1(reg, MemOperand(sp, i * kDoubleSize));
120 }
121 Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
122 PopSafepointRegisters();
123}
124
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000125
Ben Murdoch257744e2011-11-30 15:57:28 +0000126void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
127 Register dst) {
128 sw(src, SafepointRegistersAndDoublesSlot(dst));
129}
130
131
132void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
133 sw(src, SafepointRegisterSlot(dst));
134}
135
136
137void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
138 lw(dst, SafepointRegisterSlot(src));
139}
140
141
142int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
143 // The registers are pushed starting with the highest encoding,
144 // which means that lowest encodings are closest to the stack pointer.
145 return kSafepointRegisterStackIndexMap[reg_code];
146}
147
148
149MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
150 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
151}
152
153
154MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000155 UNIMPLEMENTED_MIPS();
Ben Murdoch257744e2011-11-30 15:57:28 +0000156 // General purpose registers are pushed last on the stack.
157 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
158 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
159 return MemOperand(sp, doubles_size + register_offset);
160}
161
162
Steve Block44f0eee2011-05-26 01:26:41 +0100163void MacroAssembler::InNewSpace(Register object,
164 Register scratch,
165 Condition cc,
166 Label* branch) {
167 ASSERT(cc == eq || cc == ne);
168 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
169 Branch(branch, cc, scratch,
170 Operand(ExternalReference::new_space_start(isolate())));
171}
172
173
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000174void MacroAssembler::RecordWriteField(
175 Register object,
176 int offset,
177 Register value,
178 Register dst,
179 RAStatus ra_status,
180 SaveFPRegsMode save_fp,
181 RememberedSetAction remembered_set_action,
182 SmiCheck smi_check) {
183 ASSERT(!AreAliased(value, dst, t8, object));
184 // First, check if a write barrier is even needed. The tests below
185 // catch stores of Smis.
Steve Block44f0eee2011-05-26 01:26:41 +0100186 Label done;
187
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000188 // Skip barrier if writing a smi.
189 if (smi_check == INLINE_SMI_CHECK) {
190 JumpIfSmi(value, &done);
191 }
Steve Block44f0eee2011-05-26 01:26:41 +0100192
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000193 // Although the object register is tagged, the offset is relative to the start
194 // of the object, so so offset must be a multiple of kPointerSize.
195 ASSERT(IsAligned(offset, kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +0100196
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000197 Addu(dst, object, Operand(offset - kHeapObjectTag));
198 if (emit_debug_code()) {
199 Label ok;
200 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
201 Branch(&ok, eq, t8, Operand(zero_reg));
202 stop("Unaligned cell in write barrier");
203 bind(&ok);
204 }
205
206 RecordWrite(object,
207 dst,
208 value,
209 ra_status,
210 save_fp,
211 remembered_set_action,
212 OMIT_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +0100213
214 bind(&done);
215
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000216 // Clobber clobbered input registers when running with the debug-code flag
Steve Block44f0eee2011-05-26 01:26:41 +0100217 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000218 if (emit_debug_code()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000219 li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
220 li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
Steve Block44f0eee2011-05-26 01:26:41 +0100221 }
222}
223
224
225// Will clobber 4 registers: object, address, scratch, ip. The
226// register 'object' contains a heap object pointer. The heap object
227// tag is shifted away.
228void MacroAssembler::RecordWrite(Register object,
229 Register address,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000230 Register value,
231 RAStatus ra_status,
232 SaveFPRegsMode fp_mode,
233 RememberedSetAction remembered_set_action,
234 SmiCheck smi_check) {
235 ASSERT(!AreAliased(object, address, value, t8));
236 ASSERT(!AreAliased(object, address, value, t9));
Steve Block44f0eee2011-05-26 01:26:41 +0100237 // The compiled code assumes that record write doesn't change the
238 // context register, so we check that none of the clobbered
239 // registers are cp.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000240 ASSERT(!address.is(cp) && !value.is(cp));
Steve Block44f0eee2011-05-26 01:26:41 +0100241
242 Label done;
243
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000244 if (smi_check == INLINE_SMI_CHECK) {
245 ASSERT_EQ(0, kSmiTag);
246 JumpIfSmi(value, &done);
247 }
248
249 CheckPageFlag(value,
250 value, // Used as scratch.
251 MemoryChunk::kPointersToHereAreInterestingMask,
252 eq,
253 &done);
254 CheckPageFlag(object,
255 value, // Used as scratch.
256 MemoryChunk::kPointersFromHereAreInterestingMask,
257 eq,
258 &done);
Steve Block44f0eee2011-05-26 01:26:41 +0100259
260 // Record the actual write.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000261 if (ra_status == kRAHasNotBeenSaved) {
262 push(ra);
263 }
264 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
265 CallStub(&stub);
266 if (ra_status == kRAHasNotBeenSaved) {
267 pop(ra);
268 }
Steve Block44f0eee2011-05-26 01:26:41 +0100269
270 bind(&done);
271
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000272 // Clobber clobbered registers when running with the debug-code flag
Steve Block44f0eee2011-05-26 01:26:41 +0100273 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000274 if (emit_debug_code()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000275 li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
276 li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
277 }
278}
279
280
281void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
282 Register address,
283 Register scratch,
284 SaveFPRegsMode fp_mode,
285 RememberedSetFinalAction and_then) {
286 Label done;
287 if (FLAG_debug_code) {
288 Label ok;
289 JumpIfNotInNewSpace(object, scratch, &ok);
290 stop("Remembered set pointer is in new space");
291 bind(&ok);
292 }
293 // Load store buffer top.
294 ExternalReference store_buffer =
295 ExternalReference::store_buffer_top(isolate());
296 li(t8, Operand(store_buffer));
297 lw(scratch, MemOperand(t8));
298 // Store pointer to buffer and increment buffer top.
299 sw(address, MemOperand(scratch));
300 Addu(scratch, scratch, kPointerSize);
301 // Write back new top of buffer.
302 sw(scratch, MemOperand(t8));
303 // Call stub on end of buffer.
304 // Check for end of buffer.
305 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
306 if (and_then == kFallThroughAtEnd) {
307 Branch(&done, eq, t8, Operand(zero_reg));
308 } else {
309 ASSERT(and_then == kReturnAtEnd);
310 Ret(eq, t8, Operand(zero_reg));
311 }
312 push(ra);
313 StoreBufferOverflowStub store_buffer_overflow =
314 StoreBufferOverflowStub(fp_mode);
315 CallStub(&store_buffer_overflow);
316 pop(ra);
317 bind(&done);
318 if (and_then == kReturnAtEnd) {
319 Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100320 }
321}
322
323
324// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000325// Allocation support.
Steve Block44f0eee2011-05-26 01:26:41 +0100326
327
328void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
329 Register scratch,
330 Label* miss) {
331 Label same_contexts;
332
333 ASSERT(!holder_reg.is(scratch));
334 ASSERT(!holder_reg.is(at));
335 ASSERT(!scratch.is(at));
336
337 // Load current lexical context from the stack frame.
338 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
339 // In debug mode, make sure the lexical context is set.
340#ifdef DEBUG
341 Check(ne, "we should not have an empty lexical context",
342 scratch, Operand(zero_reg));
343#endif
344
345 // Load the global context of the current context.
346 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
347 lw(scratch, FieldMemOperand(scratch, offset));
348 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
349
350 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000351 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100352 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000353 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100354 // Read the first word and compare to the global_context_map.
355 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
356 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
357 Check(eq, "JSGlobalObject::global_context should be a global context.",
358 holder_reg, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +0000359 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100360 }
361
362 // Check if both contexts are the same.
363 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
364 Branch(&same_contexts, eq, scratch, Operand(at));
365
366 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000367 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100368 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000369 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100370 mov(holder_reg, at); // Move at to its holding place.
371 LoadRoot(at, Heap::kNullValueRootIndex);
372 Check(ne, "JSGlobalProxy::context() should not be null.",
373 holder_reg, Operand(at));
374
375 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
376 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
377 Check(eq, "JSGlobalObject::global_context should be a global context.",
378 holder_reg, Operand(at));
379 // Restore at is not needed. at is reloaded below.
Ben Murdoch257744e2011-11-30 15:57:28 +0000380 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100381 // Restore at to holder's context.
382 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
383 }
384
385 // Check that the security token in the calling global object is
386 // compatible with the security token in the receiving global
387 // object.
388 int token_offset = Context::kHeaderSize +
389 Context::SECURITY_TOKEN_INDEX * kPointerSize;
390
391 lw(scratch, FieldMemOperand(scratch, token_offset));
392 lw(at, FieldMemOperand(at, token_offset));
393 Branch(miss, ne, scratch, Operand(at));
394
395 bind(&same_contexts);
Andrei Popescu31002712010-02-23 13:46:05 +0000396}
397
398
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000399void MacroAssembler::LoadFromNumberDictionary(Label* miss,
400 Register elements,
401 Register key,
402 Register result,
403 Register reg0,
404 Register reg1,
405 Register reg2) {
406 // Register use:
407 //
408 // elements - holds the slow-case elements of the receiver on entry.
409 // Unchanged unless 'result' is the same register.
410 //
411 // key - holds the smi key on entry.
412 // Unchanged unless 'result' is the same register.
413 //
414 //
415 // result - holds the result on exit if the load succeeded.
416 // Allowed to be the same as 'key' or 'result'.
417 // Unchanged on bailout so 'key' or 'result' can be used
418 // in further computation.
419 //
420 // Scratch registers:
421 //
422 // reg0 - holds the untagged key on entry and holds the hash once computed.
423 //
424 // reg1 - Used to hold the capacity mask of the dictionary.
425 //
426 // reg2 - Used for the index into the dictionary.
427 // at - Temporary (avoid MacroAssembler instructions also using 'at').
428 Label done;
429
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000430 // Compute the hash code from the untagged key. This must be kept in sync
431 // with ComputeIntegerHash in utils.h.
432 //
433 // hash = ~hash + (hash << 15);
434 nor(reg1, reg0, zero_reg);
435 sll(at, reg0, 15);
436 addu(reg0, reg1, at);
437
438 // hash = hash ^ (hash >> 12);
439 srl(at, reg0, 12);
440 xor_(reg0, reg0, at);
441
442 // hash = hash + (hash << 2);
443 sll(at, reg0, 2);
444 addu(reg0, reg0, at);
445
446 // hash = hash ^ (hash >> 4);
447 srl(at, reg0, 4);
448 xor_(reg0, reg0, at);
449
450 // hash = hash * 2057;
451 li(reg1, Operand(2057));
452 mul(reg0, reg0, reg1);
453
454 // hash = hash ^ (hash >> 16);
455 srl(at, reg0, 16);
456 xor_(reg0, reg0, at);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000457
458 // Compute the capacity mask.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000459 lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000460 sra(reg1, reg1, kSmiTagSize);
461 Subu(reg1, reg1, Operand(1));
462
463 // Generate an unrolled loop that performs a few probes before giving up.
464 static const int kProbes = 4;
465 for (int i = 0; i < kProbes; i++) {
466 // Use reg2 for index calculations and keep the hash intact in reg0.
467 mov(reg2, reg0);
468 // Compute the masked index: (hash + i + i * i) & mask.
469 if (i > 0) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000470 Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000471 }
472 and_(reg2, reg2, reg1);
473
474 // Scale the index by multiplying by the element size.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000475 ASSERT(NumberDictionary::kEntrySize == 3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000476 sll(at, reg2, 1); // 2x.
477 addu(reg2, reg2, at); // reg2 = reg2 * 3.
478
479 // Check if the key is identical to the name.
480 sll(at, reg2, kPointerSizeLog2);
481 addu(reg2, elements, at);
482
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000483 lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000484 if (i != kProbes - 1) {
485 Branch(&done, eq, key, Operand(at));
486 } else {
487 Branch(miss, ne, key, Operand(at));
488 }
489 }
490
491 bind(&done);
492 // Check that the value is a normal property.
493 // reg2: elements + (index * kPointerSize).
494 const int kDetailsOffset =
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000495 NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000496 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
Ben Murdoch589d6972011-11-30 16:04:58 +0000497 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000498 Branch(miss, ne, at, Operand(zero_reg));
499
500 // Get the value at the masked, scaled index and return.
501 const int kValueOffset =
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000502 NumberDictionary::kElementsStartOffset + kPointerSize;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000503 lw(result, FieldMemOperand(reg2, kValueOffset));
504}
505
506
Andrei Popescu31002712010-02-23 13:46:05 +0000507// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000508// Instruction macros.
Andrei Popescu31002712010-02-23 13:46:05 +0000509
Andrei Popescu31002712010-02-23 13:46:05 +0000510void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
511 if (rt.is_reg()) {
512 addu(rd, rs, rt.rm());
513 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100514 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000515 addiu(rd, rs, rt.imm32_);
516 } else {
517 // li handles the relocation.
518 ASSERT(!rs.is(at));
519 li(at, rt);
520 addu(rd, rs, at);
521 }
522 }
523}
524
525
Steve Block44f0eee2011-05-26 01:26:41 +0100526void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
527 if (rt.is_reg()) {
528 subu(rd, rs, rt.rm());
529 } else {
530 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
531 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
532 } else {
533 // li handles the relocation.
534 ASSERT(!rs.is(at));
535 li(at, rt);
536 subu(rd, rs, at);
537 }
538 }
539}
540
541
Andrei Popescu31002712010-02-23 13:46:05 +0000542void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
543 if (rt.is_reg()) {
544 mul(rd, rs, rt.rm());
545 } else {
546 // li handles the relocation.
547 ASSERT(!rs.is(at));
548 li(at, rt);
549 mul(rd, rs, at);
550 }
551}
552
553
554void MacroAssembler::Mult(Register rs, const Operand& rt) {
555 if (rt.is_reg()) {
556 mult(rs, rt.rm());
557 } else {
558 // li handles the relocation.
559 ASSERT(!rs.is(at));
560 li(at, rt);
561 mult(rs, at);
562 }
563}
564
565
566void MacroAssembler::Multu(Register rs, const Operand& rt) {
567 if (rt.is_reg()) {
568 multu(rs, rt.rm());
569 } else {
570 // li handles the relocation.
571 ASSERT(!rs.is(at));
572 li(at, rt);
573 multu(rs, at);
574 }
575}
576
577
578void MacroAssembler::Div(Register rs, const Operand& rt) {
579 if (rt.is_reg()) {
580 div(rs, rt.rm());
581 } else {
582 // li handles the relocation.
583 ASSERT(!rs.is(at));
584 li(at, rt);
585 div(rs, at);
586 }
587}
588
589
590void MacroAssembler::Divu(Register rs, const Operand& rt) {
591 if (rt.is_reg()) {
592 divu(rs, rt.rm());
593 } else {
594 // li handles the relocation.
595 ASSERT(!rs.is(at));
596 li(at, rt);
597 divu(rs, at);
598 }
599}
600
601
602void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
603 if (rt.is_reg()) {
604 and_(rd, rs, rt.rm());
605 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100606 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000607 andi(rd, rs, rt.imm32_);
608 } else {
609 // li handles the relocation.
610 ASSERT(!rs.is(at));
611 li(at, rt);
612 and_(rd, rs, at);
613 }
614 }
615}
616
617
618void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
619 if (rt.is_reg()) {
620 or_(rd, rs, rt.rm());
621 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100622 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000623 ori(rd, rs, rt.imm32_);
624 } else {
625 // li handles the relocation.
626 ASSERT(!rs.is(at));
627 li(at, rt);
628 or_(rd, rs, at);
629 }
630 }
631}
632
633
634void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
635 if (rt.is_reg()) {
636 xor_(rd, rs, rt.rm());
637 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100638 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000639 xori(rd, rs, rt.imm32_);
640 } else {
641 // li handles the relocation.
642 ASSERT(!rs.is(at));
643 li(at, rt);
644 xor_(rd, rs, at);
645 }
646 }
647}
648
649
650void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
651 if (rt.is_reg()) {
652 nor(rd, rs, rt.rm());
653 } else {
654 // li handles the relocation.
655 ASSERT(!rs.is(at));
656 li(at, rt);
657 nor(rd, rs, at);
658 }
659}
660
661
Ben Murdoch257744e2011-11-30 15:57:28 +0000662void MacroAssembler::Neg(Register rs, const Operand& rt) {
663 ASSERT(rt.is_reg());
664 ASSERT(!at.is(rs));
665 ASSERT(!at.is(rt.rm()));
666 li(at, -1);
667 xor_(rs, rt.rm(), at);
668}
669
670
Andrei Popescu31002712010-02-23 13:46:05 +0000671void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
672 if (rt.is_reg()) {
673 slt(rd, rs, rt.rm());
674 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100675 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000676 slti(rd, rs, rt.imm32_);
677 } else {
678 // li handles the relocation.
679 ASSERT(!rs.is(at));
680 li(at, rt);
681 slt(rd, rs, at);
682 }
683 }
684}
685
686
687void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
688 if (rt.is_reg()) {
689 sltu(rd, rs, rt.rm());
690 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100691 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000692 sltiu(rd, rs, rt.imm32_);
693 } else {
694 // li handles the relocation.
695 ASSERT(!rs.is(at));
696 li(at, rt);
697 sltu(rd, rs, at);
698 }
699 }
700}
701
702
Steve Block44f0eee2011-05-26 01:26:41 +0100703void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
704 if (mips32r2) {
705 if (rt.is_reg()) {
706 rotrv(rd, rs, rt.rm());
707 } else {
708 rotr(rd, rs, rt.imm32_);
709 }
710 } else {
711 if (rt.is_reg()) {
712 subu(at, zero_reg, rt.rm());
713 sllv(at, rs, at);
714 srlv(rd, rs, rt.rm());
715 or_(rd, rd, at);
716 } else {
717 if (rt.imm32_ == 0) {
718 srl(rd, rs, 0);
719 } else {
720 srl(at, rs, rt.imm32_);
721 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
722 or_(rd, rd, at);
723 }
724 }
725 }
Andrei Popescu31002712010-02-23 13:46:05 +0000726}
727
728
Steve Block44f0eee2011-05-26 01:26:41 +0100729//------------Pseudo-instructions-------------
730
Andrei Popescu31002712010-02-23 13:46:05 +0000731void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
732 ASSERT(!j.is_reg());
Steve Block44f0eee2011-05-26 01:26:41 +0100733 BlockTrampolinePoolScope block_trampoline_pool(this);
734 if (!MustUseReg(j.rmode_) && !gen2instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000735 // Normal load of an immediate value which does not need Relocation Info.
736 if (is_int16(j.imm32_)) {
737 addiu(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100738 } else if (!(j.imm32_ & kHiMask)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000739 ori(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100740 } else if (!(j.imm32_ & kImm16Mask)) {
741 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
Andrei Popescu31002712010-02-23 13:46:05 +0000742 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100743 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
744 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000745 }
Steve Block44f0eee2011-05-26 01:26:41 +0100746 } else if (MustUseReg(j.rmode_) || gen2instr) {
747 if (MustUseReg(j.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000748 RecordRelocInfo(j.rmode_, j.imm32_);
749 }
750 // We need always the same number of instructions as we may need to patch
751 // this code to load another value which may need 2 instructions to load.
Ben Murdoch257744e2011-11-30 15:57:28 +0000752 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
753 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000754 }
755}
756
757
Andrei Popescu31002712010-02-23 13:46:05 +0000758void MacroAssembler::MultiPush(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000759 int16_t num_to_push = NumberOfBitsSet(regs);
760 int16_t stack_offset = num_to_push * kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000761
Ben Murdoch589d6972011-11-30 16:04:58 +0000762 Subu(sp, sp, Operand(stack_offset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000763 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000764 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000765 stack_offset -= kPointerSize;
766 sw(ToRegister(i), MemOperand(sp, stack_offset));
Andrei Popescu31002712010-02-23 13:46:05 +0000767 }
768 }
769}
770
771
772void MacroAssembler::MultiPushReversed(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000773 int16_t num_to_push = NumberOfBitsSet(regs);
774 int16_t stack_offset = num_to_push * kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000775
Ben Murdoch589d6972011-11-30 16:04:58 +0000776 Subu(sp, sp, Operand(stack_offset));
Steve Block6ded16b2010-05-10 14:33:55 +0100777 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000778 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000779 stack_offset -= kPointerSize;
780 sw(ToRegister(i), MemOperand(sp, stack_offset));
Andrei Popescu31002712010-02-23 13:46:05 +0000781 }
782 }
783}
784
785
786void MacroAssembler::MultiPop(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000787 int16_t stack_offset = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000788
Steve Block6ded16b2010-05-10 14:33:55 +0100789 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000790 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000791 lw(ToRegister(i), MemOperand(sp, stack_offset));
792 stack_offset += kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000793 }
794 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000795 addiu(sp, sp, stack_offset);
Andrei Popescu31002712010-02-23 13:46:05 +0000796}
797
798
799void MacroAssembler::MultiPopReversed(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000800 int16_t stack_offset = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000801
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000802 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000803 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000804 lw(ToRegister(i), MemOperand(sp, stack_offset));
805 stack_offset += kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000806 }
807 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000808 addiu(sp, sp, stack_offset);
809}
810
811
812void MacroAssembler::MultiPushFPU(RegList regs) {
813 CpuFeatures::Scope scope(FPU);
814 int16_t num_to_push = NumberOfBitsSet(regs);
815 int16_t stack_offset = num_to_push * kDoubleSize;
816
817 Subu(sp, sp, Operand(stack_offset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000818 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000819 if ((regs & (1 << i)) != 0) {
820 stack_offset -= kDoubleSize;
821 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
822 }
823 }
824}
825
826
827void MacroAssembler::MultiPushReversedFPU(RegList regs) {
828 CpuFeatures::Scope scope(FPU);
829 int16_t num_to_push = NumberOfBitsSet(regs);
830 int16_t stack_offset = num_to_push * kDoubleSize;
831
832 Subu(sp, sp, Operand(stack_offset));
833 for (int16_t i = 0; i < kNumRegisters; i++) {
834 if ((regs & (1 << i)) != 0) {
835 stack_offset -= kDoubleSize;
836 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
837 }
838 }
839}
840
841
842void MacroAssembler::MultiPopFPU(RegList regs) {
843 CpuFeatures::Scope scope(FPU);
844 int16_t stack_offset = 0;
845
846 for (int16_t i = 0; i < kNumRegisters; i++) {
847 if ((regs & (1 << i)) != 0) {
848 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
849 stack_offset += kDoubleSize;
850 }
851 }
852 addiu(sp, sp, stack_offset);
853}
854
855
856void MacroAssembler::MultiPopReversedFPU(RegList regs) {
857 CpuFeatures::Scope scope(FPU);
858 int16_t stack_offset = 0;
859
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000860 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000861 if ((regs & (1 << i)) != 0) {
862 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
863 stack_offset += kDoubleSize;
864 }
865 }
866 addiu(sp, sp, stack_offset);
Andrei Popescu31002712010-02-23 13:46:05 +0000867}
868
869
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000870void MacroAssembler::FlushICache(Register address, unsigned instructions) {
871 RegList saved_regs = kJSCallerSaved | ra.bit();
872 MultiPush(saved_regs);
873 AllowExternalCallThatCantCauseGC scope(this);
874
875 // Save to a0 in case address == t0.
876 Move(a0, address);
877 PrepareCallCFunction(2, t0);
878
879 li(a1, instructions * kInstrSize);
880 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
881 MultiPop(saved_regs);
882}
883
884
Steve Block44f0eee2011-05-26 01:26:41 +0100885void MacroAssembler::Ext(Register rt,
886 Register rs,
887 uint16_t pos,
888 uint16_t size) {
889 ASSERT(pos < 32);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000890 ASSERT(pos + size < 33);
Andrei Popescu31002712010-02-23 13:46:05 +0000891
Steve Block44f0eee2011-05-26 01:26:41 +0100892 if (mips32r2) {
893 ext_(rt, rs, pos, size);
894 } else {
895 // Move rs to rt and shift it left then right to get the
896 // desired bitfield on the right side and zeroes on the left.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000897 int shift_left = 32 - (pos + size);
898 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
899
900 int shift_right = 32 - size;
901 if (shift_right > 0) {
902 srl(rt, rt, shift_right);
903 }
Steve Block44f0eee2011-05-26 01:26:41 +0100904 }
905}
906
907
908void MacroAssembler::Ins(Register rt,
909 Register rs,
910 uint16_t pos,
911 uint16_t size) {
912 ASSERT(pos < 32);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000913 ASSERT(pos + size <= 32);
914 ASSERT(size != 0);
Steve Block44f0eee2011-05-26 01:26:41 +0100915
916 if (mips32r2) {
917 ins_(rt, rs, pos, size);
918 } else {
919 ASSERT(!rt.is(t8) && !rs.is(t8));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000920 Subu(at, zero_reg, Operand(1));
921 srl(at, at, 32 - size);
922 and_(t8, rs, at);
923 sll(t8, t8, pos);
924 sll(at, at, pos);
925 nor(at, at, zero_reg);
926 and_(at, rt, at);
927 or_(rt, t8, at);
Steve Block44f0eee2011-05-26 01:26:41 +0100928 }
929}
930
931
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000932void MacroAssembler::Cvt_d_uw(FPURegister fd,
933 FPURegister fs,
934 FPURegister scratch) {
935 // Move the data from fs to t8.
936 mfc1(t8, fs);
937 Cvt_d_uw(fd, t8, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100938}
939
940
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000941void MacroAssembler::Cvt_d_uw(FPURegister fd,
942 Register rs,
943 FPURegister scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100944 // Convert rs to a FP value in fd (and fd + 1).
945 // We do this by converting rs minus the MSB to avoid sign conversion,
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000946 // then adding 2^31 to the result (if needed).
Steve Block44f0eee2011-05-26 01:26:41 +0100947
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000948 ASSERT(!fd.is(scratch));
Steve Block44f0eee2011-05-26 01:26:41 +0100949 ASSERT(!rs.is(t9));
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000950 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100951
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000952 // Save rs's MSB to t9.
953 Ext(t9, rs, 31, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100954 // Remove rs's MSB.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000955 Ext(at, rs, 0, 31);
956 // Move the result to fd.
957 mtc1(at, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100958
959 // Convert fd to a real FP value.
960 cvt_d_w(fd, fd);
961
962 Label conversion_done;
963
964 // If rs's MSB was 0, it's done.
965 // Otherwise we need to add that to the FP register.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000966 Branch(&conversion_done, eq, t9, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +0100967
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000968 // Load 2^31 into f20 as its float representation.
969 li(at, 0x41E00000);
970 mtc1(at, FPURegister::from_code(scratch.code() + 1));
971 mtc1(zero_reg, scratch);
972 // Add it to fd.
973 add_d(fd, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100974
Steve Block44f0eee2011-05-26 01:26:41 +0100975 bind(&conversion_done);
976}
977
978
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000979void MacroAssembler::Trunc_uw_d(FPURegister fd,
980 FPURegister fs,
981 FPURegister scratch) {
982 Trunc_uw_d(fs, t8, scratch);
983 mtc1(t8, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100984}
985
986
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000987void MacroAssembler::Trunc_uw_d(FPURegister fd,
988 Register rs,
989 FPURegister scratch) {
990 ASSERT(!fd.is(scratch));
991 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100992
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000993 // Load 2^31 into scratch as its float representation.
994 li(at, 0x41E00000);
995 mtc1(at, FPURegister::from_code(scratch.code() + 1));
996 mtc1(zero_reg, scratch);
997 // Test if scratch > fd.
Steve Block44f0eee2011-05-26 01:26:41 +0100998 // If fd < 2^31 we can convert it normally.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000999 Label simple_convert;
1000 BranchF(&simple_convert, NULL, lt, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +01001001
1002 // First we subtract 2^31 from fd, then trunc it to rs
1003 // and add 2^31 to rs.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001004 sub_d(scratch, fd, scratch);
1005 trunc_w_d(scratch, scratch);
1006 mfc1(rs, scratch);
1007 Or(rs, rs, 1 << 31);
Steve Block44f0eee2011-05-26 01:26:41 +01001008
1009 Label done;
1010 Branch(&done);
1011 // Simple conversion.
1012 bind(&simple_convert);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001013 trunc_w_d(scratch, fd);
1014 mfc1(rs, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +01001015
1016 bind(&done);
1017}
1018
1019
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001020void MacroAssembler::BranchF(Label* target,
1021 Label* nan,
1022 Condition cc,
1023 FPURegister cmp1,
1024 FPURegister cmp2,
1025 BranchDelaySlot bd) {
1026 if (cc == al) {
1027 Branch(bd, target);
1028 return;
1029 }
1030
1031 ASSERT(nan || target);
1032 // Check for unordered (NaN) cases.
1033 if (nan) {
1034 c(UN, D, cmp1, cmp2);
1035 bc1t(nan);
1036 }
1037
1038 if (target) {
1039 // Here NaN cases were either handled by this function or are assumed to
1040 // have been handled by the caller.
1041 // Unsigned conditions are treated as their signed counterpart.
1042 switch (cc) {
1043 case Uless:
1044 case less:
1045 c(OLT, D, cmp1, cmp2);
1046 bc1t(target);
1047 break;
1048 case Ugreater:
1049 case greater:
1050 c(ULE, D, cmp1, cmp2);
1051 bc1f(target);
1052 break;
1053 case Ugreater_equal:
1054 case greater_equal:
1055 c(ULT, D, cmp1, cmp2);
1056 bc1f(target);
1057 break;
1058 case Uless_equal:
1059 case less_equal:
1060 c(OLE, D, cmp1, cmp2);
1061 bc1t(target);
1062 break;
1063 case eq:
1064 c(EQ, D, cmp1, cmp2);
1065 bc1t(target);
1066 break;
1067 case ne:
1068 c(EQ, D, cmp1, cmp2);
1069 bc1f(target);
1070 break;
1071 default:
1072 CHECK(0);
1073 };
1074 }
1075
1076 if (bd == PROTECT) {
1077 nop();
1078 }
1079}
1080
1081
1082void MacroAssembler::Move(FPURegister dst, double imm) {
1083 ASSERT(CpuFeatures::IsEnabled(FPU));
1084 static const DoubleRepresentation minus_zero(-0.0);
1085 static const DoubleRepresentation zero(0.0);
1086 DoubleRepresentation value(imm);
1087 // Handle special values first.
1088 bool force_load = dst.is(kDoubleRegZero);
1089 if (value.bits == zero.bits && !force_load) {
1090 mov_d(dst, kDoubleRegZero);
1091 } else if (value.bits == minus_zero.bits && !force_load) {
1092 neg_d(dst, kDoubleRegZero);
1093 } else {
1094 uint32_t lo, hi;
1095 DoubleAsTwoUInt32(imm, &lo, &hi);
1096 // Move the low part of the double into the lower of the corresponding FPU
1097 // register of FPU register pair.
1098 if (lo != 0) {
1099 li(at, Operand(lo));
1100 mtc1(at, dst);
1101 } else {
1102 mtc1(zero_reg, dst);
1103 }
1104 // Move the high part of the double into the higher of the corresponding FPU
1105 // register of FPU register pair.
1106 if (hi != 0) {
1107 li(at, Operand(hi));
1108 mtc1(at, dst.high());
1109 } else {
1110 mtc1(zero_reg, dst.high());
1111 }
1112 }
1113}
1114
1115
Steve Block44f0eee2011-05-26 01:26:41 +01001116// Tries to get a signed int32 out of a double precision floating point heap
1117// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1118// 32bits signed integer range.
1119// This method implementation differs from the ARM version for performance
1120// reasons.
1121void MacroAssembler::ConvertToInt32(Register source,
1122 Register dest,
1123 Register scratch,
1124 Register scratch2,
1125 FPURegister double_scratch,
1126 Label *not_int32) {
1127 Label right_exponent, done;
1128 // Get exponent word (ENDIAN issues).
1129 lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1130 // Get exponent alone in scratch2.
1131 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1132 // Load dest with zero. We use this either for the final shift or
1133 // for the answer.
1134 mov(dest, zero_reg);
1135 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1136 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1137 // the exponent that we are fastest at and also the highest exponent we can
1138 // handle here.
1139 const uint32_t non_smi_exponent =
1140 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1141 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
1142 Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
1143 // If the exponent is higher than that then go to not_int32 case. This
1144 // catches numbers that don't fit in a signed int32, infinities and NaNs.
1145 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
1146
1147 // We know the exponent is smaller than 30 (biased). If it is less than
1148 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
1149 // it rounds to zero.
1150 const uint32_t zero_exponent =
1151 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1152 Subu(scratch2, scratch2, Operand(zero_exponent));
1153 // Dest already has a Smi zero.
1154 Branch(&done, lt, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00001155 if (!CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001156 // We have a shifted exponent between 0 and 30 in scratch2.
1157 srl(dest, scratch2, HeapNumber::kExponentShift);
1158 // We now have the exponent in dest. Subtract from 30 to get
1159 // how much to shift down.
1160 li(at, Operand(30));
1161 subu(dest, at, dest);
1162 }
1163 bind(&right_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +00001164 if (CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001165 CpuFeatures::Scope scope(FPU);
1166 // MIPS FPU instructions implementing double precision to integer
1167 // conversion using round to zero. Since the FP value was qualified
1168 // above, the resulting integer should be a legal int32.
1169 // The original 'Exponent' word is still in scratch.
1170 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1171 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1172 trunc_w_d(double_scratch, double_scratch);
1173 mfc1(dest, double_scratch);
1174 } else {
1175 // On entry, dest has final downshift, scratch has original sign/exp/mant.
1176 // Save sign bit in top bit of dest.
1177 And(scratch2, scratch, Operand(0x80000000));
1178 Or(dest, dest, Operand(scratch2));
1179 // Put back the implicit 1, just above mantissa field.
1180 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1181
1182 // Shift up the mantissa bits to take up the space the exponent used to
1183 // take. We just orred in the implicit bit so that took care of one and
1184 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1185 // distance. But we want to clear the sign-bit so shift one more bit
1186 // left, then shift right one bit.
1187 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1188 sll(scratch, scratch, shift_distance + 1);
1189 srl(scratch, scratch, 1);
1190
1191 // Get the second half of the double. For some exponents we don't
1192 // actually need this because the bits get shifted out again, but
1193 // it's probably slower to test than just to do it.
1194 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1195 // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
1196 // The width of the field here is the same as the shift amount above.
1197 const int field_width = shift_distance;
1198 Ext(scratch2, scratch2, 32-shift_distance, field_width);
1199 Ins(scratch, scratch2, 0, field_width);
1200 // Move down according to the exponent.
1201 srlv(scratch, scratch, dest);
1202 // Prepare the negative version of our integer.
1203 subu(scratch2, zero_reg, scratch);
1204 // Trick to check sign bit (msb) held in dest, count leading zero.
1205 // 0 indicates negative, save negative version with conditional move.
1206 clz(dest, dest);
1207 movz(scratch, scratch2, dest);
1208 mov(dest, scratch);
1209 }
1210 bind(&done);
1211}
1212
1213
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001214void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1215 FPURegister result,
1216 DoubleRegister double_input,
1217 Register scratch1,
1218 Register except_flag,
1219 CheckForInexactConversion check_inexact) {
1220 ASSERT(CpuFeatures::IsSupported(FPU));
1221 CpuFeatures::Scope scope(FPU);
1222
1223 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1224
1225 if (check_inexact == kDontCheckForInexactConversion) {
1226 // Ingore inexact exceptions.
1227 except_mask &= ~kFCSRInexactFlagMask;
1228 }
1229
1230 // Save FCSR.
1231 cfc1(scratch1, FCSR);
1232 // Disable FPU exceptions.
1233 ctc1(zero_reg, FCSR);
1234
1235 // Do operation based on rounding mode.
1236 switch (rounding_mode) {
1237 case kRoundToNearest:
1238 round_w_d(result, double_input);
1239 break;
1240 case kRoundToZero:
1241 trunc_w_d(result, double_input);
1242 break;
1243 case kRoundToPlusInf:
1244 ceil_w_d(result, double_input);
1245 break;
1246 case kRoundToMinusInf:
1247 floor_w_d(result, double_input);
1248 break;
1249 } // End of switch-statement.
1250
1251 // Retrieve FCSR.
1252 cfc1(except_flag, FCSR);
1253 // Restore FCSR.
1254 ctc1(scratch1, FCSR);
1255
1256 // Check for fpu exceptions.
1257 And(except_flag, except_flag, Operand(except_mask));
1258}
1259
1260
Ben Murdoch257744e2011-11-30 15:57:28 +00001261void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1262 Register input_high,
1263 Register input_low,
1264 Register scratch) {
1265 Label done, normal_exponent, restore_sign;
1266 // Extract the biased exponent in result.
1267 Ext(result,
1268 input_high,
1269 HeapNumber::kExponentShift,
1270 HeapNumber::kExponentBits);
1271
1272 // Check for Infinity and NaNs, which should return 0.
1273 Subu(scratch, result, HeapNumber::kExponentMask);
1274 movz(result, zero_reg, scratch);
1275 Branch(&done, eq, scratch, Operand(zero_reg));
1276
1277 // Express exponent as delta to (number of mantissa bits + 31).
1278 Subu(result,
1279 result,
1280 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1281
1282 // If the delta is strictly positive, all bits would be shifted away,
1283 // which means that we can return 0.
1284 Branch(&normal_exponent, le, result, Operand(zero_reg));
1285 mov(result, zero_reg);
1286 Branch(&done);
1287
1288 bind(&normal_exponent);
1289 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1290 // Calculate shift.
1291 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1292
1293 // Save the sign.
1294 Register sign = result;
1295 result = no_reg;
1296 And(sign, input_high, Operand(HeapNumber::kSignMask));
1297
1298 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1299 // to check for this specific case.
1300 Label high_shift_needed, high_shift_done;
1301 Branch(&high_shift_needed, lt, scratch, Operand(32));
1302 mov(input_high, zero_reg);
1303 Branch(&high_shift_done);
1304 bind(&high_shift_needed);
1305
1306 // Set the implicit 1 before the mantissa part in input_high.
1307 Or(input_high,
1308 input_high,
1309 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1310 // Shift the mantissa bits to the correct position.
1311 // We don't need to clear non-mantissa bits as they will be shifted away.
1312 // If they weren't, it would mean that the answer is in the 32bit range.
1313 sllv(input_high, input_high, scratch);
1314
1315 bind(&high_shift_done);
1316
1317 // Replace the shifted bits with bits from the lower mantissa word.
1318 Label pos_shift, shift_done;
1319 li(at, 32);
1320 subu(scratch, at, scratch);
1321 Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1322
1323 // Negate scratch.
1324 Subu(scratch, zero_reg, scratch);
1325 sllv(input_low, input_low, scratch);
1326 Branch(&shift_done);
1327
1328 bind(&pos_shift);
1329 srlv(input_low, input_low, scratch);
1330
1331 bind(&shift_done);
1332 Or(input_high, input_high, Operand(input_low));
1333 // Restore sign if necessary.
1334 mov(scratch, sign);
1335 result = sign;
1336 sign = no_reg;
1337 Subu(result, zero_reg, input_high);
1338 movz(result, input_high, scratch);
1339 bind(&done);
1340}
1341
1342
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001343void MacroAssembler::EmitECMATruncate(Register result,
1344 FPURegister double_input,
1345 FPURegister single_scratch,
1346 Register scratch,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001347 Register scratch2,
1348 Register scratch3) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001349 CpuFeatures::Scope scope(FPU);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001350 ASSERT(!scratch2.is(result));
1351 ASSERT(!scratch3.is(result));
1352 ASSERT(!scratch3.is(scratch2));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001353 ASSERT(!scratch.is(result) &&
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001354 !scratch.is(scratch2) &&
1355 !scratch.is(scratch3));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001356 ASSERT(!single_scratch.is(double_input));
1357
1358 Label done;
1359 Label manual;
1360
1361 // Clear cumulative exception flags and save the FCSR.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001362 cfc1(scratch2, FCSR);
1363 ctc1(zero_reg, FCSR);
1364 // Try a conversion to a signed integer.
1365 trunc_w_d(single_scratch, double_input);
1366 mfc1(result, single_scratch);
1367 // Retrieve and restore the FCSR.
1368 cfc1(scratch, FCSR);
1369 ctc1(scratch2, FCSR);
1370 // Check for overflow and NaNs.
1371 And(scratch,
1372 scratch,
1373 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1374 // If we had no exceptions we are done.
1375 Branch(&done, eq, scratch, Operand(zero_reg));
1376
1377 // Load the double value and perform a manual truncation.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001378 Register input_high = scratch2;
1379 Register input_low = scratch3;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001380 Move(input_low, input_high, double_input);
1381 EmitOutOfInt32RangeTruncate(result,
1382 input_high,
1383 input_low,
1384 scratch);
1385 bind(&done);
1386}
1387
1388
Ben Murdoch257744e2011-11-30 15:57:28 +00001389void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1390 Register src,
1391 int num_least_bits) {
1392 Ext(dst, src, kSmiTagSize, num_least_bits);
1393}
1394
1395
1396void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1397 Register src,
1398 int num_least_bits) {
1399 And(dst, src, Operand((1 << num_least_bits) - 1));
1400}
1401
1402
Steve Block44f0eee2011-05-26 01:26:41 +01001403// Emulated condtional branches do not emit a nop in the branch delay slot.
1404//
1405// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1406#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1407 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1408 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1409
1410
1411void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001412 BranchShort(offset, bdslot);
1413}
1414
1415
1416void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1417 const Operand& rt,
1418 BranchDelaySlot bdslot) {
1419 BranchShort(offset, cond, rs, rt, bdslot);
1420}
1421
1422
1423void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001424 if (L->is_bound()) {
1425 if (is_near(L)) {
1426 BranchShort(L, bdslot);
1427 } else {
1428 Jr(L, bdslot);
1429 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001430 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001431 if (is_trampoline_emitted()) {
1432 Jr(L, bdslot);
1433 } else {
1434 BranchShort(L, bdslot);
1435 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001436 }
1437}
1438
1439
1440void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1441 const Operand& rt,
1442 BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001443 if (L->is_bound()) {
1444 if (is_near(L)) {
1445 BranchShort(L, cond, rs, rt, bdslot);
1446 } else {
1447 Label skip;
1448 Condition neg_cond = NegateCondition(cond);
1449 BranchShort(&skip, neg_cond, rs, rt);
1450 Jr(L, bdslot);
1451 bind(&skip);
1452 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001453 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001454 if (is_trampoline_emitted()) {
1455 Label skip;
1456 Condition neg_cond = NegateCondition(cond);
1457 BranchShort(&skip, neg_cond, rs, rt);
1458 Jr(L, bdslot);
1459 bind(&skip);
1460 } else {
1461 BranchShort(L, cond, rs, rt, bdslot);
1462 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001463 }
1464}
1465
1466
1467void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001468 b(offset);
1469
1470 // Emit a nop in the branch delay slot if required.
1471 if (bdslot == PROTECT)
1472 nop();
1473}
1474
1475
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001476void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1477 const Operand& rt,
1478 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001479 BRANCH_ARGS_CHECK(cond, rs, rt);
1480 ASSERT(!rs.is(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01001481 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001482 Register scratch = at;
1483
Andrei Popescu31002712010-02-23 13:46:05 +00001484 if (rt.is_reg()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001485 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1486 // rt.
Andrei Popescu31002712010-02-23 13:46:05 +00001487 r2 = rt.rm_;
Steve Block44f0eee2011-05-26 01:26:41 +01001488 switch (cond) {
1489 case cc_always:
1490 b(offset);
1491 break;
1492 case eq:
1493 beq(rs, r2, offset);
1494 break;
1495 case ne:
1496 bne(rs, r2, offset);
1497 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001498 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001499 case greater:
1500 if (r2.is(zero_reg)) {
1501 bgtz(rs, offset);
1502 } else {
1503 slt(scratch, r2, rs);
1504 bne(scratch, zero_reg, offset);
1505 }
1506 break;
1507 case greater_equal:
1508 if (r2.is(zero_reg)) {
1509 bgez(rs, offset);
1510 } else {
1511 slt(scratch, rs, r2);
1512 beq(scratch, zero_reg, offset);
1513 }
1514 break;
1515 case less:
1516 if (r2.is(zero_reg)) {
1517 bltz(rs, offset);
1518 } else {
1519 slt(scratch, rs, r2);
1520 bne(scratch, zero_reg, offset);
1521 }
1522 break;
1523 case less_equal:
1524 if (r2.is(zero_reg)) {
1525 blez(rs, offset);
1526 } else {
1527 slt(scratch, r2, rs);
1528 beq(scratch, zero_reg, offset);
1529 }
1530 break;
Andrei Popescu31002712010-02-23 13:46:05 +00001531 // Unsigned comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001532 case Ugreater:
1533 if (r2.is(zero_reg)) {
1534 bgtz(rs, offset);
1535 } else {
1536 sltu(scratch, r2, rs);
1537 bne(scratch, zero_reg, offset);
1538 }
1539 break;
1540 case Ugreater_equal:
1541 if (r2.is(zero_reg)) {
1542 bgez(rs, offset);
1543 } else {
1544 sltu(scratch, rs, r2);
1545 beq(scratch, zero_reg, offset);
1546 }
1547 break;
1548 case Uless:
1549 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001550 // No code needs to be emitted.
1551 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001552 } else {
1553 sltu(scratch, rs, r2);
1554 bne(scratch, zero_reg, offset);
1555 }
1556 break;
1557 case Uless_equal:
1558 if (r2.is(zero_reg)) {
1559 b(offset);
1560 } else {
1561 sltu(scratch, r2, rs);
1562 beq(scratch, zero_reg, offset);
1563 }
1564 break;
1565 default:
1566 UNREACHABLE();
1567 }
1568 } else {
1569 // Be careful to always use shifted_branch_offset only just before the
1570 // branch instruction, as the location will be remember for patching the
1571 // target.
1572 switch (cond) {
1573 case cc_always:
1574 b(offset);
1575 break;
1576 case eq:
1577 // We don't want any other register but scratch clobbered.
1578 ASSERT(!scratch.is(rs));
1579 r2 = scratch;
1580 li(r2, rt);
1581 beq(rs, r2, offset);
1582 break;
1583 case ne:
1584 // We don't want any other register but scratch clobbered.
1585 ASSERT(!scratch.is(rs));
1586 r2 = scratch;
1587 li(r2, rt);
1588 bne(rs, r2, offset);
1589 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001590 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001591 case greater:
1592 if (rt.imm32_ == 0) {
1593 bgtz(rs, offset);
1594 } else {
1595 r2 = scratch;
1596 li(r2, rt);
1597 slt(scratch, r2, rs);
1598 bne(scratch, zero_reg, offset);
1599 }
1600 break;
1601 case greater_equal:
1602 if (rt.imm32_ == 0) {
1603 bgez(rs, offset);
1604 } else if (is_int16(rt.imm32_)) {
1605 slti(scratch, rs, rt.imm32_);
1606 beq(scratch, zero_reg, offset);
1607 } else {
1608 r2 = scratch;
1609 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001610 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001611 beq(scratch, zero_reg, offset);
1612 }
1613 break;
1614 case less:
1615 if (rt.imm32_ == 0) {
1616 bltz(rs, offset);
1617 } else if (is_int16(rt.imm32_)) {
1618 slti(scratch, rs, rt.imm32_);
1619 bne(scratch, zero_reg, offset);
1620 } else {
1621 r2 = scratch;
1622 li(r2, rt);
1623 slt(scratch, rs, r2);
1624 bne(scratch, zero_reg, offset);
1625 }
1626 break;
1627 case less_equal:
1628 if (rt.imm32_ == 0) {
1629 blez(rs, offset);
1630 } else {
1631 r2 = scratch;
1632 li(r2, rt);
1633 slt(scratch, r2, rs);
1634 beq(scratch, zero_reg, offset);
1635 }
1636 break;
1637 // Unsigned comparison.
1638 case Ugreater:
1639 if (rt.imm32_ == 0) {
1640 bgtz(rs, offset);
1641 } else {
1642 r2 = scratch;
1643 li(r2, rt);
1644 sltu(scratch, r2, rs);
1645 bne(scratch, zero_reg, offset);
1646 }
1647 break;
1648 case Ugreater_equal:
1649 if (rt.imm32_ == 0) {
1650 bgez(rs, offset);
1651 } else if (is_int16(rt.imm32_)) {
1652 sltiu(scratch, rs, rt.imm32_);
1653 beq(scratch, zero_reg, offset);
1654 } else {
1655 r2 = scratch;
1656 li(r2, rt);
1657 sltu(scratch, rs, r2);
1658 beq(scratch, zero_reg, offset);
1659 }
1660 break;
1661 case Uless:
1662 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001663 // No code needs to be emitted.
1664 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001665 } else if (is_int16(rt.imm32_)) {
1666 sltiu(scratch, rs, rt.imm32_);
1667 bne(scratch, zero_reg, offset);
1668 } else {
1669 r2 = scratch;
1670 li(r2, rt);
1671 sltu(scratch, rs, r2);
1672 bne(scratch, zero_reg, offset);
1673 }
1674 break;
1675 case Uless_equal:
1676 if (rt.imm32_ == 0) {
1677 b(offset);
1678 } else {
1679 r2 = scratch;
1680 li(r2, rt);
1681 sltu(scratch, r2, rs);
1682 beq(scratch, zero_reg, offset);
1683 }
1684 break;
1685 default:
1686 UNREACHABLE();
1687 }
Andrei Popescu31002712010-02-23 13:46:05 +00001688 }
Steve Block44f0eee2011-05-26 01:26:41 +01001689 // Emit a nop in the branch delay slot if required.
1690 if (bdslot == PROTECT)
1691 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001692}
1693
1694
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001695void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Andrei Popescu31002712010-02-23 13:46:05 +00001696 // We use branch_offset as an argument for the branch instructions to be sure
1697 // it is called just before generating the branch instruction, as needed.
1698
Steve Block44f0eee2011-05-26 01:26:41 +01001699 b(shifted_branch_offset(L, false));
Andrei Popescu31002712010-02-23 13:46:05 +00001700
Steve Block44f0eee2011-05-26 01:26:41 +01001701 // Emit a nop in the branch delay slot if required.
1702 if (bdslot == PROTECT)
1703 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001704}
1705
1706
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001707void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1708 const Operand& rt,
1709 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001710 BRANCH_ARGS_CHECK(cond, rs, rt);
1711
1712 int32_t offset;
1713 Register r2 = no_reg;
1714 Register scratch = at;
1715 if (rt.is_reg()) {
1716 r2 = rt.rm_;
1717 // Be careful to always use shifted_branch_offset only just before the
1718 // branch instruction, as the location will be remember for patching the
1719 // target.
1720 switch (cond) {
1721 case cc_always:
1722 offset = shifted_branch_offset(L, false);
1723 b(offset);
1724 break;
1725 case eq:
1726 offset = shifted_branch_offset(L, false);
1727 beq(rs, r2, offset);
1728 break;
1729 case ne:
1730 offset = shifted_branch_offset(L, false);
1731 bne(rs, r2, offset);
1732 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001733 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001734 case greater:
1735 if (r2.is(zero_reg)) {
1736 offset = shifted_branch_offset(L, false);
1737 bgtz(rs, offset);
1738 } else {
1739 slt(scratch, r2, rs);
1740 offset = shifted_branch_offset(L, false);
1741 bne(scratch, zero_reg, offset);
1742 }
1743 break;
1744 case greater_equal:
1745 if (r2.is(zero_reg)) {
1746 offset = shifted_branch_offset(L, false);
1747 bgez(rs, offset);
1748 } else {
1749 slt(scratch, rs, r2);
1750 offset = shifted_branch_offset(L, false);
1751 beq(scratch, zero_reg, offset);
1752 }
1753 break;
1754 case less:
1755 if (r2.is(zero_reg)) {
1756 offset = shifted_branch_offset(L, false);
1757 bltz(rs, offset);
1758 } else {
1759 slt(scratch, rs, r2);
1760 offset = shifted_branch_offset(L, false);
1761 bne(scratch, zero_reg, offset);
1762 }
1763 break;
1764 case less_equal:
1765 if (r2.is(zero_reg)) {
1766 offset = shifted_branch_offset(L, false);
1767 blez(rs, offset);
1768 } else {
1769 slt(scratch, r2, rs);
1770 offset = shifted_branch_offset(L, false);
1771 beq(scratch, zero_reg, offset);
1772 }
1773 break;
1774 // Unsigned comparison.
1775 case Ugreater:
1776 if (r2.is(zero_reg)) {
1777 offset = shifted_branch_offset(L, false);
1778 bgtz(rs, offset);
1779 } else {
1780 sltu(scratch, r2, rs);
1781 offset = shifted_branch_offset(L, false);
1782 bne(scratch, zero_reg, offset);
1783 }
1784 break;
1785 case Ugreater_equal:
1786 if (r2.is(zero_reg)) {
1787 offset = shifted_branch_offset(L, false);
1788 bgez(rs, offset);
1789 } else {
1790 sltu(scratch, rs, r2);
1791 offset = shifted_branch_offset(L, false);
1792 beq(scratch, zero_reg, offset);
1793 }
1794 break;
1795 case Uless:
1796 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001797 // No code needs to be emitted.
1798 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001799 } else {
1800 sltu(scratch, rs, r2);
1801 offset = shifted_branch_offset(L, false);
1802 bne(scratch, zero_reg, offset);
1803 }
1804 break;
1805 case Uless_equal:
1806 if (r2.is(zero_reg)) {
1807 offset = shifted_branch_offset(L, false);
1808 b(offset);
1809 } else {
1810 sltu(scratch, r2, rs);
1811 offset = shifted_branch_offset(L, false);
1812 beq(scratch, zero_reg, offset);
1813 }
1814 break;
1815 default:
1816 UNREACHABLE();
1817 }
1818 } else {
1819 // Be careful to always use shifted_branch_offset only just before the
1820 // branch instruction, as the location will be remember for patching the
1821 // target.
1822 switch (cond) {
1823 case cc_always:
1824 offset = shifted_branch_offset(L, false);
1825 b(offset);
1826 break;
1827 case eq:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001828 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001829 r2 = scratch;
1830 li(r2, rt);
1831 offset = shifted_branch_offset(L, false);
1832 beq(rs, r2, offset);
1833 break;
1834 case ne:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001835 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001836 r2 = scratch;
1837 li(r2, rt);
1838 offset = shifted_branch_offset(L, false);
1839 bne(rs, r2, offset);
1840 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001841 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001842 case greater:
1843 if (rt.imm32_ == 0) {
1844 offset = shifted_branch_offset(L, false);
1845 bgtz(rs, offset);
1846 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001847 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001848 r2 = scratch;
1849 li(r2, rt);
1850 slt(scratch, r2, rs);
1851 offset = shifted_branch_offset(L, false);
1852 bne(scratch, zero_reg, offset);
1853 }
1854 break;
1855 case greater_equal:
1856 if (rt.imm32_ == 0) {
1857 offset = shifted_branch_offset(L, false);
1858 bgez(rs, offset);
1859 } else if (is_int16(rt.imm32_)) {
1860 slti(scratch, rs, rt.imm32_);
1861 offset = shifted_branch_offset(L, false);
1862 beq(scratch, zero_reg, offset);
1863 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001864 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001865 r2 = scratch;
1866 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001867 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001868 offset = shifted_branch_offset(L, false);
1869 beq(scratch, zero_reg, offset);
1870 }
1871 break;
1872 case less:
1873 if (rt.imm32_ == 0) {
1874 offset = shifted_branch_offset(L, false);
1875 bltz(rs, offset);
1876 } else if (is_int16(rt.imm32_)) {
1877 slti(scratch, rs, rt.imm32_);
1878 offset = shifted_branch_offset(L, false);
1879 bne(scratch, zero_reg, offset);
1880 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001881 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001882 r2 = scratch;
1883 li(r2, rt);
1884 slt(scratch, rs, r2);
1885 offset = shifted_branch_offset(L, false);
1886 bne(scratch, zero_reg, offset);
1887 }
1888 break;
1889 case less_equal:
1890 if (rt.imm32_ == 0) {
1891 offset = shifted_branch_offset(L, false);
1892 blez(rs, offset);
1893 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001894 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001895 r2 = scratch;
1896 li(r2, rt);
1897 slt(scratch, r2, rs);
1898 offset = shifted_branch_offset(L, false);
1899 beq(scratch, zero_reg, offset);
1900 }
1901 break;
1902 // Unsigned comparison.
1903 case Ugreater:
1904 if (rt.imm32_ == 0) {
1905 offset = shifted_branch_offset(L, false);
1906 bgtz(rs, offset);
1907 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001908 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001909 r2 = scratch;
1910 li(r2, rt);
1911 sltu(scratch, r2, rs);
1912 offset = shifted_branch_offset(L, false);
1913 bne(scratch, zero_reg, offset);
1914 }
1915 break;
1916 case Ugreater_equal:
1917 if (rt.imm32_ == 0) {
1918 offset = shifted_branch_offset(L, false);
1919 bgez(rs, offset);
1920 } else if (is_int16(rt.imm32_)) {
1921 sltiu(scratch, rs, rt.imm32_);
1922 offset = shifted_branch_offset(L, false);
1923 beq(scratch, zero_reg, offset);
1924 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001925 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001926 r2 = scratch;
1927 li(r2, rt);
1928 sltu(scratch, rs, r2);
1929 offset = shifted_branch_offset(L, false);
1930 beq(scratch, zero_reg, offset);
1931 }
1932 break;
1933 case Uless:
1934 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001935 // No code needs to be emitted.
1936 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001937 } else if (is_int16(rt.imm32_)) {
1938 sltiu(scratch, rs, rt.imm32_);
1939 offset = shifted_branch_offset(L, false);
1940 bne(scratch, zero_reg, offset);
1941 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001942 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001943 r2 = scratch;
1944 li(r2, rt);
1945 sltu(scratch, rs, r2);
1946 offset = shifted_branch_offset(L, false);
1947 bne(scratch, zero_reg, offset);
1948 }
1949 break;
1950 case Uless_equal:
1951 if (rt.imm32_ == 0) {
1952 offset = shifted_branch_offset(L, false);
1953 b(offset);
1954 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001955 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001956 r2 = scratch;
1957 li(r2, rt);
1958 sltu(scratch, r2, rs);
1959 offset = shifted_branch_offset(L, false);
1960 beq(scratch, zero_reg, offset);
1961 }
1962 break;
1963 default:
1964 UNREACHABLE();
1965 }
1966 }
1967 // Check that offset could actually hold on an int16_t.
1968 ASSERT(is_int16(offset));
1969 // Emit a nop in the branch delay slot if required.
1970 if (bdslot == PROTECT)
1971 nop();
1972}
1973
1974
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001975void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
1976 BranchAndLinkShort(offset, bdslot);
1977}
1978
1979
1980void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
1981 const Operand& rt,
1982 BranchDelaySlot bdslot) {
1983 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
1984}
1985
1986
1987void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001988 if (L->is_bound()) {
1989 if (is_near(L)) {
1990 BranchAndLinkShort(L, bdslot);
1991 } else {
1992 Jalr(L, bdslot);
1993 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001994 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001995 if (is_trampoline_emitted()) {
1996 Jalr(L, bdslot);
1997 } else {
1998 BranchAndLinkShort(L, bdslot);
1999 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002000 }
2001}
2002
2003
2004void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2005 const Operand& rt,
2006 BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002007 if (L->is_bound()) {
2008 if (is_near(L)) {
2009 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2010 } else {
2011 Label skip;
2012 Condition neg_cond = NegateCondition(cond);
2013 BranchShort(&skip, neg_cond, rs, rt);
2014 Jalr(L, bdslot);
2015 bind(&skip);
2016 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002017 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002018 if (is_trampoline_emitted()) {
2019 Label skip;
2020 Condition neg_cond = NegateCondition(cond);
2021 BranchShort(&skip, neg_cond, rs, rt);
2022 Jalr(L, bdslot);
2023 bind(&skip);
2024 } else {
2025 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2026 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002027 }
2028}
2029
2030
Andrei Popescu31002712010-02-23 13:46:05 +00002031// We need to use a bgezal or bltzal, but they can't be used directly with the
2032// slt instructions. We could use sub or add instead but we would miss overflow
2033// cases, so we keep slt and add an intermediate third instruction.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002034void MacroAssembler::BranchAndLinkShort(int16_t offset,
2035 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002036 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002037
Steve Block44f0eee2011-05-26 01:26:41 +01002038 // Emit a nop in the branch delay slot if required.
2039 if (bdslot == PROTECT)
2040 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00002041}
2042
2043
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002044void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2045 Register rs, const Operand& rt,
2046 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002047 BRANCH_ARGS_CHECK(cond, rs, rt);
Steve Block6ded16b2010-05-10 14:33:55 +01002048 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01002049 Register scratch = at;
2050
Andrei Popescu31002712010-02-23 13:46:05 +00002051 if (rt.is_reg()) {
2052 r2 = rt.rm_;
2053 } else if (cond != cc_always) {
2054 r2 = scratch;
2055 li(r2, rt);
2056 }
2057
2058 switch (cond) {
2059 case cc_always:
Steve Block44f0eee2011-05-26 01:26:41 +01002060 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002061 break;
2062 case eq:
2063 bne(rs, r2, 2);
2064 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002065 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002066 break;
2067 case ne:
2068 beq(rs, r2, 2);
2069 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002070 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002071 break;
2072
Ben Murdoch257744e2011-11-30 15:57:28 +00002073 // Signed comparison.
Andrei Popescu31002712010-02-23 13:46:05 +00002074 case greater:
2075 slt(scratch, r2, rs);
2076 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002077 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002078 break;
2079 case greater_equal:
2080 slt(scratch, rs, r2);
2081 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002082 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002083 break;
2084 case less:
2085 slt(scratch, rs, r2);
2086 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002087 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002088 break;
2089 case less_equal:
2090 slt(scratch, r2, rs);
2091 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002092 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002093 break;
2094
2095 // Unsigned comparison.
2096 case Ugreater:
2097 sltu(scratch, r2, rs);
2098 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002099 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002100 break;
2101 case Ugreater_equal:
2102 sltu(scratch, rs, r2);
2103 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002104 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002105 break;
2106 case Uless:
2107 sltu(scratch, rs, r2);
2108 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002109 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002110 break;
2111 case Uless_equal:
2112 sltu(scratch, r2, rs);
2113 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002114 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002115 break;
2116
2117 default:
2118 UNREACHABLE();
2119 }
Steve Block44f0eee2011-05-26 01:26:41 +01002120 // Emit a nop in the branch delay slot if required.
2121 if (bdslot == PROTECT)
2122 nop();
2123}
2124
2125
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002126void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002127 bal(shifted_branch_offset(L, false));
2128
2129 // Emit a nop in the branch delay slot if required.
2130 if (bdslot == PROTECT)
2131 nop();
2132}
2133
2134
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002135void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2136 const Operand& rt,
2137 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002138 BRANCH_ARGS_CHECK(cond, rs, rt);
2139
2140 int32_t offset;
2141 Register r2 = no_reg;
2142 Register scratch = at;
2143 if (rt.is_reg()) {
2144 r2 = rt.rm_;
2145 } else if (cond != cc_always) {
2146 r2 = scratch;
2147 li(r2, rt);
2148 }
2149
2150 switch (cond) {
2151 case cc_always:
2152 offset = shifted_branch_offset(L, false);
2153 bal(offset);
2154 break;
2155 case eq:
2156 bne(rs, r2, 2);
2157 nop();
2158 offset = shifted_branch_offset(L, false);
2159 bal(offset);
2160 break;
2161 case ne:
2162 beq(rs, r2, 2);
2163 nop();
2164 offset = shifted_branch_offset(L, false);
2165 bal(offset);
2166 break;
2167
Ben Murdoch257744e2011-11-30 15:57:28 +00002168 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01002169 case greater:
2170 slt(scratch, r2, rs);
2171 addiu(scratch, scratch, -1);
2172 offset = shifted_branch_offset(L, false);
2173 bgezal(scratch, offset);
2174 break;
2175 case greater_equal:
2176 slt(scratch, rs, r2);
2177 addiu(scratch, scratch, -1);
2178 offset = shifted_branch_offset(L, false);
2179 bltzal(scratch, offset);
2180 break;
2181 case less:
2182 slt(scratch, rs, r2);
2183 addiu(scratch, scratch, -1);
2184 offset = shifted_branch_offset(L, false);
2185 bgezal(scratch, offset);
2186 break;
2187 case less_equal:
2188 slt(scratch, r2, rs);
2189 addiu(scratch, scratch, -1);
2190 offset = shifted_branch_offset(L, false);
2191 bltzal(scratch, offset);
2192 break;
2193
2194 // Unsigned comparison.
2195 case Ugreater:
2196 sltu(scratch, r2, rs);
2197 addiu(scratch, scratch, -1);
2198 offset = shifted_branch_offset(L, false);
2199 bgezal(scratch, offset);
2200 break;
2201 case Ugreater_equal:
2202 sltu(scratch, rs, r2);
2203 addiu(scratch, scratch, -1);
2204 offset = shifted_branch_offset(L, false);
2205 bltzal(scratch, offset);
2206 break;
2207 case Uless:
2208 sltu(scratch, rs, r2);
2209 addiu(scratch, scratch, -1);
2210 offset = shifted_branch_offset(L, false);
2211 bgezal(scratch, offset);
2212 break;
2213 case Uless_equal:
2214 sltu(scratch, r2, rs);
2215 addiu(scratch, scratch, -1);
2216 offset = shifted_branch_offset(L, false);
2217 bltzal(scratch, offset);
2218 break;
2219
2220 default:
2221 UNREACHABLE();
2222 }
2223
2224 // Check that offset could actually hold on an int16_t.
2225 ASSERT(is_int16(offset));
2226
2227 // Emit a nop in the branch delay slot if required.
2228 if (bdslot == PROTECT)
2229 nop();
2230}
2231
2232
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002233void MacroAssembler::Jump(Register target,
Steve Block44f0eee2011-05-26 01:26:41 +01002234 Condition cond,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002235 Register rs,
2236 const Operand& rt,
2237 BranchDelaySlot bd) {
2238 BlockTrampolinePoolScope block_trampoline_pool(this);
2239 if (cond == cc_always) {
2240 jr(target);
2241 } else {
2242 BRANCH_ARGS_CHECK(cond, rs, rt);
2243 Branch(2, NegateCondition(cond), rs, rt);
2244 jr(target);
2245 }
2246 // Emit a nop in the branch delay slot if required.
2247 if (bd == PROTECT)
2248 nop();
2249}
2250
2251
2252void MacroAssembler::Jump(intptr_t target,
2253 RelocInfo::Mode rmode,
2254 Condition cond,
2255 Register rs,
2256 const Operand& rt,
2257 BranchDelaySlot bd) {
2258 li(t9, Operand(target, rmode));
2259 Jump(t9, cond, rs, rt, bd);
2260}
2261
2262
2263void MacroAssembler::Jump(Address target,
2264 RelocInfo::Mode rmode,
2265 Condition cond,
2266 Register rs,
2267 const Operand& rt,
2268 BranchDelaySlot bd) {
2269 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2270 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2271}
2272
2273
2274void MacroAssembler::Jump(Handle<Code> code,
2275 RelocInfo::Mode rmode,
2276 Condition cond,
2277 Register rs,
2278 const Operand& rt,
2279 BranchDelaySlot bd) {
2280 ASSERT(RelocInfo::IsCodeTarget(rmode));
2281 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2282}
2283
2284
2285int MacroAssembler::CallSize(Register target,
2286 Condition cond,
2287 Register rs,
2288 const Operand& rt,
2289 BranchDelaySlot bd) {
2290 int size = 0;
2291
2292 if (cond == cc_always) {
2293 size += 1;
2294 } else {
2295 size += 3;
Steve Block44f0eee2011-05-26 01:26:41 +01002296 }
2297
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002298 if (bd == PROTECT)
2299 size += 1;
Steve Block44f0eee2011-05-26 01:26:41 +01002300
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002301 return size * kInstrSize;
2302}
Steve Block44f0eee2011-05-26 01:26:41 +01002303
Steve Block44f0eee2011-05-26 01:26:41 +01002304
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002305// Note: To call gcc-compiled C code on mips, you must call thru t9.
2306void MacroAssembler::Call(Register target,
2307 Condition cond,
2308 Register rs,
2309 const Operand& rt,
2310 BranchDelaySlot bd) {
2311 BlockTrampolinePoolScope block_trampoline_pool(this);
2312 Label start;
2313 bind(&start);
2314 if (cond == cc_always) {
2315 jalr(target);
2316 } else {
2317 BRANCH_ARGS_CHECK(cond, rs, rt);
2318 Branch(2, NegateCondition(cond), rs, rt);
2319 jalr(target);
Steve Block44f0eee2011-05-26 01:26:41 +01002320 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002321 // Emit a nop in the branch delay slot if required.
2322 if (bd == PROTECT)
2323 nop();
2324
2325 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2326 SizeOfCodeGeneratedSince(&start));
2327}
2328
2329
2330int MacroAssembler::CallSize(Address target,
2331 RelocInfo::Mode rmode,
2332 Condition cond,
2333 Register rs,
2334 const Operand& rt,
2335 BranchDelaySlot bd) {
2336 int size = CallSize(t9, cond, rs, rt, bd);
2337 return size + 2 * kInstrSize;
2338}
2339
2340
2341void MacroAssembler::Call(Address target,
2342 RelocInfo::Mode rmode,
2343 Condition cond,
2344 Register rs,
2345 const Operand& rt,
2346 BranchDelaySlot bd) {
2347 BlockTrampolinePoolScope block_trampoline_pool(this);
2348 Label start;
2349 bind(&start);
2350 int32_t target_int = reinterpret_cast<int32_t>(target);
2351 // Must record previous source positions before the
2352 // li() generates a new code target.
2353 positions_recorder()->WriteRecordedPositions();
2354 li(t9, Operand(target_int, rmode), true);
2355 Call(t9, cond, rs, rt, bd);
2356 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2357 SizeOfCodeGeneratedSince(&start));
2358}
2359
2360
2361int MacroAssembler::CallSize(Handle<Code> code,
2362 RelocInfo::Mode rmode,
2363 unsigned ast_id,
2364 Condition cond,
2365 Register rs,
2366 const Operand& rt,
2367 BranchDelaySlot bd) {
2368 return CallSize(reinterpret_cast<Address>(code.location()),
2369 rmode, cond, rs, rt, bd);
2370}
2371
2372
2373void MacroAssembler::Call(Handle<Code> code,
2374 RelocInfo::Mode rmode,
2375 unsigned ast_id,
2376 Condition cond,
2377 Register rs,
2378 const Operand& rt,
2379 BranchDelaySlot bd) {
2380 BlockTrampolinePoolScope block_trampoline_pool(this);
2381 Label start;
2382 bind(&start);
2383 ASSERT(RelocInfo::IsCodeTarget(rmode));
2384 if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2385 SetRecordedAstId(ast_id);
2386 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2387 }
2388 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2389 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
2390 SizeOfCodeGeneratedSince(&start));
2391}
2392
2393
2394void MacroAssembler::Ret(Condition cond,
2395 Register rs,
2396 const Operand& rt,
2397 BranchDelaySlot bd) {
2398 Jump(ra, cond, rs, rt, bd);
2399}
2400
2401
2402void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2403 BlockTrampolinePoolScope block_trampoline_pool(this);
2404
2405 uint32_t imm28;
2406 imm28 = jump_address(L);
2407 imm28 &= kImm28Mask;
2408 { BlockGrowBufferScope block_buf_growth(this);
2409 // Buffer growth (and relocation) must be blocked for internal references
2410 // until associated instructions are emitted and available to be patched.
2411 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2412 j(imm28);
2413 }
2414 // Emit a nop in the branch delay slot if required.
2415 if (bdslot == PROTECT)
2416 nop();
2417}
2418
2419
2420void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2421 BlockTrampolinePoolScope block_trampoline_pool(this);
2422
2423 uint32_t imm32;
2424 imm32 = jump_address(L);
2425 { BlockGrowBufferScope block_buf_growth(this);
2426 // Buffer growth (and relocation) must be blocked for internal references
2427 // until associated instructions are emitted and available to be patched.
2428 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2429 lui(at, (imm32 & kHiMask) >> kLuiShift);
2430 ori(at, at, (imm32 & kImm16Mask));
2431 }
2432 jr(at);
2433
2434 // Emit a nop in the branch delay slot if required.
2435 if (bdslot == PROTECT)
2436 nop();
2437}
2438
2439
2440void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2441 BlockTrampolinePoolScope block_trampoline_pool(this);
2442
2443 uint32_t imm32;
2444 imm32 = jump_address(L);
2445 { BlockGrowBufferScope block_buf_growth(this);
2446 // Buffer growth (and relocation) must be blocked for internal references
2447 // until associated instructions are emitted and available to be patched.
2448 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2449 lui(at, (imm32 & kHiMask) >> kLuiShift);
2450 ori(at, at, (imm32 & kImm16Mask));
2451 }
2452 jalr(at);
2453
2454 // Emit a nop in the branch delay slot if required.
2455 if (bdslot == PROTECT)
2456 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002457}
2458
2459
2460void MacroAssembler::DropAndRet(int drop,
2461 Condition cond,
2462 Register r1,
2463 const Operand& r2) {
2464 // This is a workaround to make sure only one branch instruction is
2465 // generated. It relies on Drop and Ret not creating branches if
2466 // cond == cc_always.
2467 Label skip;
2468 if (cond != cc_always) {
2469 Branch(&skip, NegateCondition(cond), r1, r2);
2470 }
2471
2472 Drop(drop);
2473 Ret();
2474
2475 if (cond != cc_always) {
2476 bind(&skip);
2477 }
2478}
2479
2480
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002481void MacroAssembler::Drop(int count,
2482 Condition cond,
2483 Register reg,
2484 const Operand& op) {
2485 if (count <= 0) {
2486 return;
2487 }
2488
2489 Label skip;
2490
2491 if (cond != al) {
2492 Branch(&skip, NegateCondition(cond), reg, op);
2493 }
2494
2495 addiu(sp, sp, count * kPointerSize);
2496
2497 if (cond != al) {
2498 bind(&skip);
2499 }
2500}
2501
2502
2503
Steve Block44f0eee2011-05-26 01:26:41 +01002504void MacroAssembler::Swap(Register reg1,
2505 Register reg2,
2506 Register scratch) {
2507 if (scratch.is(no_reg)) {
2508 Xor(reg1, reg1, Operand(reg2));
2509 Xor(reg2, reg2, Operand(reg1));
2510 Xor(reg1, reg1, Operand(reg2));
2511 } else {
2512 mov(scratch, reg1);
2513 mov(reg1, reg2);
2514 mov(reg2, scratch);
2515 }
Andrei Popescu31002712010-02-23 13:46:05 +00002516}
2517
2518
2519void MacroAssembler::Call(Label* target) {
Steve Block44f0eee2011-05-26 01:26:41 +01002520 BranchAndLink(target);
2521}
2522
2523
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002524void MacroAssembler::Push(Handle<Object> handle) {
2525 li(at, Operand(handle));
2526 push(at);
2527}
2528
2529
Steve Block6ded16b2010-05-10 14:33:55 +01002530#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002531
Steve Block44f0eee2011-05-26 01:26:41 +01002532void MacroAssembler::DebugBreak() {
Steve Block44f0eee2011-05-26 01:26:41 +01002533 mov(a0, zero_reg);
2534 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2535 CEntryStub ces(1);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002536 ASSERT(AllowThisStubCall(&ces));
Steve Block44f0eee2011-05-26 01:26:41 +01002537 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2538}
2539
2540#endif // ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002541
2542
Andrei Popescu31002712010-02-23 13:46:05 +00002543// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002544// Exception handling.
Andrei Popescu31002712010-02-23 13:46:05 +00002545
2546void MacroAssembler::PushTryHandler(CodeLocation try_location,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002547 HandlerType type,
2548 int handler_index) {
Steve Block6ded16b2010-05-10 14:33:55 +01002549 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002550 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2551 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002552 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2553 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2554 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2555 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002556
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002557 // For the JSEntry handler, we must preserve a0-a3 and s0.
2558 // t1-t3 are available. We will build up the handler from the bottom by
2559 // pushing on the stack. First compute the state.
2560 unsigned state = StackHandler::OffsetField::encode(handler_index);
Steve Block6ded16b2010-05-10 14:33:55 +01002561 if (try_location == IN_JAVASCRIPT) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002562 state |= (type == TRY_CATCH_HANDLER)
2563 ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
2564 : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
Steve Block6ded16b2010-05-10 14:33:55 +01002565 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002566 ASSERT(try_location == IN_JS_ENTRY);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002567 state |= StackHandler::KindField::encode(StackHandler::ENTRY);
Steve Block6ded16b2010-05-10 14:33:55 +01002568 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002569
2570 // Set up the code object (t1) and the state (t2) for pushing.
2571 li(t1, Operand(CodeObject()));
2572 li(t2, Operand(state));
2573
2574 // Push the frame pointer, context, state, and code object.
2575 if (try_location == IN_JAVASCRIPT) {
2576 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2577 } else {
2578 ASSERT_EQ(Smi::FromInt(0), 0);
2579 // The second zero_reg indicates no context.
2580 // The first zero_reg is the NULL frame pointer.
2581 // The operands are reversed to match the order of MultiPush/Pop.
2582 Push(zero_reg, zero_reg, t2, t1);
2583 }
2584
2585 // Link the current handler as the next handler.
2586 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2587 lw(t1, MemOperand(t2));
2588 push(t1);
2589 // Set this new handler as the current one.
2590 sw(sp, MemOperand(t2));
Andrei Popescu31002712010-02-23 13:46:05 +00002591}
2592
2593
2594void MacroAssembler::PopTryHandler() {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002595 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01002596 pop(a1);
2597 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
Ben Murdoch589d6972011-11-30 16:04:58 +00002598 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01002599 sw(a1, MemOperand(at));
Andrei Popescu31002712010-02-23 13:46:05 +00002600}
2601
2602
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002603void MacroAssembler::JumpToHandlerEntry() {
2604 // Compute the handler entry address and jump to it. The handler table is
2605 // a fixed array of (smi-tagged) code offsets.
2606 // v0 = exception, a1 = code object, a2 = state.
2607 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2608 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2609 srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2610 sll(a2, a2, kPointerSizeLog2);
2611 Addu(a2, a3, a2);
2612 lw(a2, MemOperand(a2)); // Smi-tagged offset.
2613 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2614 sra(t9, a2, kSmiTagSize);
2615 Addu(t9, t9, a1);
2616 Jump(t9); // Jump.
2617}
Ben Murdoch257744e2011-11-30 15:57:28 +00002618
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002619
2620void MacroAssembler::Throw(Register value) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002621 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002622 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002623 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2624 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2625 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2626 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2627 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002628
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002629 // The exception is expected in v0.
2630 Move(v0, value);
2631
2632 // Drop the stack pointer to the top of the top handler.
Ben Murdoch589d6972011-11-30 16:04:58 +00002633 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002634 isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002635 lw(sp, MemOperand(a3));
2636
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002637 // Restore the next handler.
Ben Murdoch257744e2011-11-30 15:57:28 +00002638 pop(a2);
2639 sw(a2, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002640
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002641 // Get the code object (a1) and state (a2). Restore the context and frame
2642 // pointer.
2643 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002644
2645 // If the handler is a JS frame, restore the context to the frame.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002646 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2647 // or cp.
Ben Murdoch257744e2011-11-30 15:57:28 +00002648 Label done;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002649 Branch(&done, eq, cp, Operand(zero_reg));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002650 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00002651 bind(&done);
2652
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002653 JumpToHandlerEntry();
Ben Murdoch257744e2011-11-30 15:57:28 +00002654}
2655
2656
2657void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2658 Register value) {
2659 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002660 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2661 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002662 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2663 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2664 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2665 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002666
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002667 // The exception is expected in v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00002668 if (type == OUT_OF_MEMORY) {
2669 // Set external caught exception to false.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002670 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
2671 isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00002672 li(a0, Operand(false, RelocInfo::NONE));
2673 li(a2, Operand(external_caught));
2674 sw(a0, MemOperand(a2));
2675
2676 // Set pending exception and v0 to out of memory exception.
2677 Failure* out_of_memory = Failure::OutOfMemoryException();
2678 li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
Ben Murdoch589d6972011-11-30 16:04:58 +00002679 li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002680 isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002681 sw(v0, MemOperand(a2));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002682 } else if (!value.is(v0)) {
2683 mov(v0, value);
Ben Murdoch257744e2011-11-30 15:57:28 +00002684 }
2685
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002686 // Drop the stack pointer to the top of the top stack handler.
2687 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2688 lw(sp, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002689
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002690 // Unwind the handlers until the ENTRY handler is found.
2691 Label fetch_next, check_kind;
2692 jmp(&check_kind);
2693 bind(&fetch_next);
2694 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00002695
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002696 bind(&check_kind);
2697 STATIC_ASSERT(StackHandler::ENTRY == 0);
2698 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2699 And(a2, a2, Operand(StackHandler::KindField::kMask));
2700 Branch(&fetch_next, ne, a2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00002701
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002702 // Set the top handler address to next handler past the top ENTRY handler.
2703 pop(a2);
2704 sw(a2, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002705
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002706 // Get the code object (a1) and state (a2). Clear the context and frame
2707 // pointer (0 was saved in the handler).
2708 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00002709
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002710 JumpToHandlerEntry();
Ben Murdoch257744e2011-11-30 15:57:28 +00002711}
2712
2713
Steve Block44f0eee2011-05-26 01:26:41 +01002714void MacroAssembler::AllocateInNewSpace(int object_size,
2715 Register result,
2716 Register scratch1,
2717 Register scratch2,
2718 Label* gc_required,
2719 AllocationFlags flags) {
2720 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002721 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002722 // Trash the registers to simulate an allocation failure.
2723 li(result, 0x7091);
2724 li(scratch1, 0x7191);
2725 li(scratch2, 0x7291);
2726 }
2727 jmp(gc_required);
2728 return;
Steve Block6ded16b2010-05-10 14:33:55 +01002729 }
2730
Steve Block44f0eee2011-05-26 01:26:41 +01002731 ASSERT(!result.is(scratch1));
2732 ASSERT(!result.is(scratch2));
2733 ASSERT(!scratch1.is(scratch2));
2734 ASSERT(!scratch1.is(t9));
2735 ASSERT(!scratch2.is(t9));
2736 ASSERT(!result.is(t9));
Steve Block6ded16b2010-05-10 14:33:55 +01002737
Steve Block44f0eee2011-05-26 01:26:41 +01002738 // Make object size into bytes.
2739 if ((flags & SIZE_IN_WORDS) != 0) {
2740 object_size *= kPointerSize;
2741 }
2742 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01002743
Steve Block44f0eee2011-05-26 01:26:41 +01002744 // Check relative positions of allocation top and limit addresses.
2745 // ARM adds additional checks to make sure the ldm instruction can be
2746 // used. On MIPS we don't have ldm so we don't need additional checks either.
2747 ExternalReference new_space_allocation_top =
2748 ExternalReference::new_space_allocation_top_address(isolate());
2749 ExternalReference new_space_allocation_limit =
2750 ExternalReference::new_space_allocation_limit_address(isolate());
2751 intptr_t top =
2752 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2753 intptr_t limit =
2754 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2755 ASSERT((limit - top) == kPointerSize);
2756
2757 // Set up allocation top address and object size registers.
2758 Register topaddr = scratch1;
2759 Register obj_size_reg = scratch2;
2760 li(topaddr, Operand(new_space_allocation_top));
2761 li(obj_size_reg, Operand(object_size));
2762
2763 // This code stores a temporary value in t9.
2764 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2765 // Load allocation top into result and allocation limit into t9.
2766 lw(result, MemOperand(topaddr));
2767 lw(t9, MemOperand(topaddr, kPointerSize));
2768 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002769 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002770 // Assert that result actually contains top on entry. t9 is used
2771 // immediately below so this use of t9 does not cause difference with
2772 // respect to register content between debug and release mode.
2773 lw(t9, MemOperand(topaddr));
2774 Check(eq, "Unexpected allocation top", result, Operand(t9));
2775 }
2776 // Load allocation limit into t9. Result already contains allocation top.
2777 lw(t9, MemOperand(topaddr, limit - top));
2778 }
2779
2780 // Calculate new top and bail out if new space is exhausted. Use result
2781 // to calculate the new top.
2782 Addu(scratch2, result, Operand(obj_size_reg));
2783 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2784 sw(scratch2, MemOperand(topaddr));
2785
2786 // Tag object if requested.
2787 if ((flags & TAG_OBJECT) != 0) {
2788 Addu(result, result, Operand(kHeapObjectTag));
2789 }
Steve Block6ded16b2010-05-10 14:33:55 +01002790}
2791
2792
Steve Block44f0eee2011-05-26 01:26:41 +01002793void MacroAssembler::AllocateInNewSpace(Register object_size,
2794 Register result,
2795 Register scratch1,
2796 Register scratch2,
2797 Label* gc_required,
2798 AllocationFlags flags) {
2799 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002800 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002801 // Trash the registers to simulate an allocation failure.
2802 li(result, 0x7091);
2803 li(scratch1, 0x7191);
2804 li(scratch2, 0x7291);
2805 }
2806 jmp(gc_required);
2807 return;
2808 }
2809
2810 ASSERT(!result.is(scratch1));
2811 ASSERT(!result.is(scratch2));
2812 ASSERT(!scratch1.is(scratch2));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002813 ASSERT(!object_size.is(t9));
Steve Block44f0eee2011-05-26 01:26:41 +01002814 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2815
2816 // Check relative positions of allocation top and limit addresses.
2817 // ARM adds additional checks to make sure the ldm instruction can be
2818 // used. On MIPS we don't have ldm so we don't need additional checks either.
2819 ExternalReference new_space_allocation_top =
2820 ExternalReference::new_space_allocation_top_address(isolate());
2821 ExternalReference new_space_allocation_limit =
2822 ExternalReference::new_space_allocation_limit_address(isolate());
2823 intptr_t top =
2824 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2825 intptr_t limit =
2826 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2827 ASSERT((limit - top) == kPointerSize);
2828
2829 // Set up allocation top address and object size registers.
2830 Register topaddr = scratch1;
2831 li(topaddr, Operand(new_space_allocation_top));
2832
2833 // This code stores a temporary value in t9.
2834 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2835 // Load allocation top into result and allocation limit into t9.
2836 lw(result, MemOperand(topaddr));
2837 lw(t9, MemOperand(topaddr, kPointerSize));
2838 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002839 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002840 // Assert that result actually contains top on entry. t9 is used
2841 // immediately below so this use of t9 does not cause difference with
2842 // respect to register content between debug and release mode.
2843 lw(t9, MemOperand(topaddr));
2844 Check(eq, "Unexpected allocation top", result, Operand(t9));
2845 }
2846 // Load allocation limit into t9. Result already contains allocation top.
2847 lw(t9, MemOperand(topaddr, limit - top));
2848 }
2849
2850 // Calculate new top and bail out if new space is exhausted. Use result
2851 // to calculate the new top. Object size may be in words so a shift is
2852 // required to get the number of bytes.
2853 if ((flags & SIZE_IN_WORDS) != 0) {
2854 sll(scratch2, object_size, kPointerSizeLog2);
2855 Addu(scratch2, result, scratch2);
2856 } else {
2857 Addu(scratch2, result, Operand(object_size));
2858 }
2859 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2860
2861 // Update allocation top. result temporarily holds the new top.
Ben Murdoch257744e2011-11-30 15:57:28 +00002862 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002863 And(t9, scratch2, Operand(kObjectAlignmentMask));
2864 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2865 }
2866 sw(scratch2, MemOperand(topaddr));
2867
2868 // Tag object if requested.
2869 if ((flags & TAG_OBJECT) != 0) {
2870 Addu(result, result, Operand(kHeapObjectTag));
2871 }
2872}
2873
2874
2875void MacroAssembler::UndoAllocationInNewSpace(Register object,
2876 Register scratch) {
2877 ExternalReference new_space_allocation_top =
2878 ExternalReference::new_space_allocation_top_address(isolate());
2879
2880 // Make sure the object has no tag before resetting top.
2881 And(object, object, Operand(~kHeapObjectTagMask));
2882#ifdef DEBUG
2883 // Check that the object un-allocated is below the current top.
2884 li(scratch, Operand(new_space_allocation_top));
2885 lw(scratch, MemOperand(scratch));
2886 Check(less, "Undo allocation of non allocated memory",
2887 object, Operand(scratch));
2888#endif
2889 // Write the address of the object to un-allocate as the current top.
2890 li(scratch, Operand(new_space_allocation_top));
2891 sw(object, MemOperand(scratch));
2892}
2893
2894
2895void MacroAssembler::AllocateTwoByteString(Register result,
2896 Register length,
2897 Register scratch1,
2898 Register scratch2,
2899 Register scratch3,
2900 Label* gc_required) {
2901 // Calculate the number of bytes needed for the characters in the string while
2902 // observing object alignment.
2903 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2904 sll(scratch1, length, 1); // Length in bytes, not chars.
2905 addiu(scratch1, scratch1,
2906 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2907 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2908
2909 // Allocate two-byte string in new space.
2910 AllocateInNewSpace(scratch1,
2911 result,
2912 scratch2,
2913 scratch3,
2914 gc_required,
2915 TAG_OBJECT);
2916
2917 // Set the map, length and hash field.
2918 InitializeNewString(result,
2919 length,
2920 Heap::kStringMapRootIndex,
2921 scratch1,
2922 scratch2);
2923}
2924
2925
2926void MacroAssembler::AllocateAsciiString(Register result,
2927 Register length,
2928 Register scratch1,
2929 Register scratch2,
2930 Register scratch3,
2931 Label* gc_required) {
2932 // Calculate the number of bytes needed for the characters in the string
2933 // while observing object alignment.
2934 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2935 ASSERT(kCharSize == 1);
2936 addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2937 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2938
2939 // Allocate ASCII string in new space.
2940 AllocateInNewSpace(scratch1,
2941 result,
2942 scratch2,
2943 scratch3,
2944 gc_required,
2945 TAG_OBJECT);
2946
2947 // Set the map, length and hash field.
2948 InitializeNewString(result,
2949 length,
2950 Heap::kAsciiStringMapRootIndex,
2951 scratch1,
2952 scratch2);
2953}
2954
2955
2956void MacroAssembler::AllocateTwoByteConsString(Register result,
2957 Register length,
2958 Register scratch1,
2959 Register scratch2,
2960 Label* gc_required) {
2961 AllocateInNewSpace(ConsString::kSize,
2962 result,
2963 scratch1,
2964 scratch2,
2965 gc_required,
2966 TAG_OBJECT);
2967 InitializeNewString(result,
2968 length,
2969 Heap::kConsStringMapRootIndex,
2970 scratch1,
2971 scratch2);
2972}
2973
2974
2975void MacroAssembler::AllocateAsciiConsString(Register result,
2976 Register length,
2977 Register scratch1,
2978 Register scratch2,
2979 Label* gc_required) {
2980 AllocateInNewSpace(ConsString::kSize,
2981 result,
2982 scratch1,
2983 scratch2,
2984 gc_required,
2985 TAG_OBJECT);
2986 InitializeNewString(result,
2987 length,
2988 Heap::kConsAsciiStringMapRootIndex,
2989 scratch1,
2990 scratch2);
2991}
2992
2993
Ben Murdoch589d6972011-11-30 16:04:58 +00002994void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2995 Register length,
2996 Register scratch1,
2997 Register scratch2,
2998 Label* gc_required) {
2999 AllocateInNewSpace(SlicedString::kSize,
3000 result,
3001 scratch1,
3002 scratch2,
3003 gc_required,
3004 TAG_OBJECT);
3005
3006 InitializeNewString(result,
3007 length,
3008 Heap::kSlicedStringMapRootIndex,
3009 scratch1,
3010 scratch2);
3011}
3012
3013
3014void MacroAssembler::AllocateAsciiSlicedString(Register result,
3015 Register length,
3016 Register scratch1,
3017 Register scratch2,
3018 Label* gc_required) {
3019 AllocateInNewSpace(SlicedString::kSize,
3020 result,
3021 scratch1,
3022 scratch2,
3023 gc_required,
3024 TAG_OBJECT);
3025
3026 InitializeNewString(result,
3027 length,
3028 Heap::kSlicedAsciiStringMapRootIndex,
3029 scratch1,
3030 scratch2);
3031}
3032
3033
Steve Block44f0eee2011-05-26 01:26:41 +01003034// Allocates a heap number or jumps to the label if the young space is full and
3035// a scavenge is needed.
3036void MacroAssembler::AllocateHeapNumber(Register result,
3037 Register scratch1,
3038 Register scratch2,
3039 Register heap_number_map,
3040 Label* need_gc) {
3041 // Allocate an object in the heap for the heap number and tag it as a heap
3042 // object.
3043 AllocateInNewSpace(HeapNumber::kSize,
3044 result,
3045 scratch1,
3046 scratch2,
3047 need_gc,
3048 TAG_OBJECT);
3049
3050 // Store heap number map in the allocated object.
3051 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3052 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3053}
3054
3055
3056void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3057 FPURegister value,
3058 Register scratch1,
3059 Register scratch2,
3060 Label* gc_required) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003061 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3062 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
Steve Block44f0eee2011-05-26 01:26:41 +01003063 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3064}
3065
3066
3067// Copies a fixed number of fields of heap objects from src to dst.
3068void MacroAssembler::CopyFields(Register dst,
3069 Register src,
3070 RegList temps,
3071 int field_count) {
3072 ASSERT((temps & dst.bit()) == 0);
3073 ASSERT((temps & src.bit()) == 0);
3074 // Primitive implementation using only one temporary register.
3075
3076 Register tmp = no_reg;
3077 // Find a temp register in temps list.
3078 for (int i = 0; i < kNumRegisters; i++) {
3079 if ((temps & (1 << i)) != 0) {
3080 tmp.code_ = i;
3081 break;
3082 }
3083 }
3084 ASSERT(!tmp.is(no_reg));
3085
3086 for (int i = 0; i < field_count; i++) {
3087 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3088 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3089 }
3090}
3091
3092
Ben Murdoch257744e2011-11-30 15:57:28 +00003093void MacroAssembler::CopyBytes(Register src,
3094 Register dst,
3095 Register length,
3096 Register scratch) {
3097 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3098
3099 // Align src before copying in word size chunks.
3100 bind(&align_loop);
3101 Branch(&done, eq, length, Operand(zero_reg));
3102 bind(&align_loop_1);
3103 And(scratch, src, kPointerSize - 1);
3104 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3105 lbu(scratch, MemOperand(src));
3106 Addu(src, src, 1);
3107 sb(scratch, MemOperand(dst));
3108 Addu(dst, dst, 1);
3109 Subu(length, length, Operand(1));
3110 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3111
3112 // Copy bytes in word size chunks.
3113 bind(&word_loop);
3114 if (emit_debug_code()) {
3115 And(scratch, src, kPointerSize - 1);
3116 Assert(eq, "Expecting alignment for CopyBytes",
3117 scratch, Operand(zero_reg));
3118 }
3119 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3120 lw(scratch, MemOperand(src));
3121 Addu(src, src, kPointerSize);
3122
3123 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3124 // Can't use unaligned access - copy byte by byte.
3125 sb(scratch, MemOperand(dst, 0));
3126 srl(scratch, scratch, 8);
3127 sb(scratch, MemOperand(dst, 1));
3128 srl(scratch, scratch, 8);
3129 sb(scratch, MemOperand(dst, 2));
3130 srl(scratch, scratch, 8);
3131 sb(scratch, MemOperand(dst, 3));
3132 Addu(dst, dst, 4);
3133
3134 Subu(length, length, Operand(kPointerSize));
3135 Branch(&word_loop);
3136
3137 // Copy the last bytes if any left.
3138 bind(&byte_loop);
3139 Branch(&done, eq, length, Operand(zero_reg));
3140 bind(&byte_loop_1);
3141 lbu(scratch, MemOperand(src));
3142 Addu(src, src, 1);
3143 sb(scratch, MemOperand(dst));
3144 Addu(dst, dst, 1);
3145 Subu(length, length, Operand(1));
3146 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3147 bind(&done);
3148}
3149
3150
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003151void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3152 Register end_offset,
3153 Register filler) {
3154 Label loop, entry;
3155 Branch(&entry);
3156 bind(&loop);
3157 sw(filler, MemOperand(start_offset));
3158 Addu(start_offset, start_offset, kPointerSize);
3159 bind(&entry);
3160 Branch(&loop, lt, start_offset, Operand(end_offset));
3161}
3162
3163
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003164void MacroAssembler::CheckFastElements(Register map,
3165 Register scratch,
3166 Label* fail) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003167 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3168 STATIC_ASSERT(FAST_ELEMENTS == 1);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003169 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3170 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
3171}
3172
3173
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003174void MacroAssembler::CheckFastObjectElements(Register map,
3175 Register scratch,
3176 Label* fail) {
3177 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3178 STATIC_ASSERT(FAST_ELEMENTS == 1);
3179 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3180 Branch(fail, ls, scratch,
3181 Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
3182 Branch(fail, hi, scratch,
3183 Operand(Map::kMaximumBitField2FastElementValue));
3184}
3185
3186
3187void MacroAssembler::CheckFastSmiOnlyElements(Register map,
3188 Register scratch,
3189 Label* fail) {
3190 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3191 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3192 Branch(fail, hi, scratch,
3193 Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
3194}
3195
3196
3197void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3198 Register key_reg,
3199 Register receiver_reg,
3200 Register elements_reg,
3201 Register scratch1,
3202 Register scratch2,
3203 Register scratch3,
3204 Register scratch4,
3205 Label* fail) {
3206 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3207 Register mantissa_reg = scratch2;
3208 Register exponent_reg = scratch3;
3209
3210 // Handle smi values specially.
3211 JumpIfSmi(value_reg, &smi_value);
3212
3213 // Ensure that the object is a heap number
3214 CheckMap(value_reg,
3215 scratch1,
3216 isolate()->factory()->heap_number_map(),
3217 fail,
3218 DONT_DO_SMI_CHECK);
3219
3220 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3221 // in the exponent.
3222 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3223 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3224 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3225
3226 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3227
3228 bind(&have_double_value);
3229 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3230 Addu(scratch1, scratch1, elements_reg);
3231 sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3232 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
3233 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3234 jmp(&done);
3235
3236 bind(&maybe_nan);
3237 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3238 // it's an Infinity, and the non-NaN code path applies.
3239 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3240 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3241 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3242 bind(&is_nan);
3243 // Load canonical NaN for storing into the double array.
3244 uint64_t nan_int64 = BitCast<uint64_t>(
3245 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3246 li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3247 li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3248 jmp(&have_double_value);
3249
3250 bind(&smi_value);
3251 Addu(scratch1, elements_reg,
3252 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3253 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3254 Addu(scratch1, scratch1, scratch2);
3255 // scratch1 is now effective address of the double element
3256
3257 FloatingPointHelper::Destination destination;
3258 if (CpuFeatures::IsSupported(FPU)) {
3259 destination = FloatingPointHelper::kFPURegisters;
3260 } else {
3261 destination = FloatingPointHelper::kCoreRegisters;
3262 }
3263
3264 Register untagged_value = receiver_reg;
3265 SmiUntag(untagged_value, value_reg);
3266 FloatingPointHelper::ConvertIntToDouble(this,
3267 untagged_value,
3268 destination,
3269 f0,
3270 mantissa_reg,
3271 exponent_reg,
3272 scratch4,
3273 f2);
3274 if (destination == FloatingPointHelper::kFPURegisters) {
3275 CpuFeatures::Scope scope(FPU);
3276 sdc1(f0, MemOperand(scratch1, 0));
3277 } else {
3278 sw(mantissa_reg, MemOperand(scratch1, 0));
3279 sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
3280 }
3281 bind(&done);
3282}
3283
3284
Steve Block44f0eee2011-05-26 01:26:41 +01003285void MacroAssembler::CheckMap(Register obj,
3286 Register scratch,
3287 Handle<Map> map,
3288 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00003289 SmiCheckType smi_check_type) {
3290 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01003291 JumpIfSmi(obj, fail);
3292 }
3293 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3294 li(at, Operand(map));
3295 Branch(fail, ne, scratch, Operand(at));
3296}
3297
3298
Ben Murdoch257744e2011-11-30 15:57:28 +00003299void MacroAssembler::DispatchMap(Register obj,
3300 Register scratch,
3301 Handle<Map> map,
3302 Handle<Code> success,
3303 SmiCheckType smi_check_type) {
3304 Label fail;
3305 if (smi_check_type == DO_SMI_CHECK) {
3306 JumpIfSmi(obj, &fail);
3307 }
3308 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3309 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3310 bind(&fail);
3311}
3312
3313
Steve Block44f0eee2011-05-26 01:26:41 +01003314void MacroAssembler::CheckMap(Register obj,
3315 Register scratch,
3316 Heap::RootListIndex index,
3317 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00003318 SmiCheckType smi_check_type) {
3319 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01003320 JumpIfSmi(obj, fail);
3321 }
3322 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3323 LoadRoot(at, index);
3324 Branch(fail, ne, scratch, Operand(at));
Steve Block6ded16b2010-05-10 14:33:55 +01003325}
3326
3327
Ben Murdoch257744e2011-11-30 15:57:28 +00003328void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3329 CpuFeatures::Scope scope(FPU);
3330 if (IsMipsSoftFloatABI) {
3331 Move(dst, v0, v1);
3332 } else {
3333 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3334 }
3335}
3336
3337
3338void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3339 CpuFeatures::Scope scope(FPU);
3340 if (!IsMipsSoftFloatABI) {
3341 Move(f12, dreg);
3342 } else {
3343 Move(a0, a1, dreg);
3344 }
3345}
3346
3347
3348void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3349 DoubleRegister dreg2) {
3350 CpuFeatures::Scope scope(FPU);
3351 if (!IsMipsSoftFloatABI) {
3352 if (dreg2.is(f12)) {
3353 ASSERT(!dreg1.is(f14));
3354 Move(f14, dreg2);
3355 Move(f12, dreg1);
3356 } else {
3357 Move(f12, dreg1);
3358 Move(f14, dreg2);
3359 }
3360 } else {
3361 Move(a0, a1, dreg1);
3362 Move(a2, a3, dreg2);
3363 }
3364}
3365
3366
3367void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3368 Register reg) {
3369 CpuFeatures::Scope scope(FPU);
3370 if (!IsMipsSoftFloatABI) {
3371 Move(f12, dreg);
3372 Move(a2, reg);
3373 } else {
3374 Move(a2, reg);
3375 Move(a0, a1, dreg);
3376 }
3377}
3378
3379
3380void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3381 // This macro takes the dst register to make the code more readable
3382 // at the call sites. However, the dst register has to be t1 to
3383 // follow the calling convention which requires the call type to be
3384 // in t1.
3385 ASSERT(dst.is(t1));
3386 if (call_kind == CALL_AS_FUNCTION) {
3387 li(dst, Operand(Smi::FromInt(1)));
3388 } else {
3389 li(dst, Operand(Smi::FromInt(0)));
3390 }
3391}
3392
3393
Steve Block6ded16b2010-05-10 14:33:55 +01003394// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003395// JavaScript invokes.
Steve Block6ded16b2010-05-10 14:33:55 +01003396
3397void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3398 const ParameterCount& actual,
3399 Handle<Code> code_constant,
3400 Register code_reg,
3401 Label* done,
Steve Block44f0eee2011-05-26 01:26:41 +01003402 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003403 const CallWrapper& call_wrapper,
3404 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003405 bool definitely_matches = false;
3406 Label regular_invoke;
3407
3408 // Check whether the expected and actual arguments count match. If not,
3409 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3410 // a0: actual arguments count
3411 // a1: function (passed through to callee)
3412 // a2: expected arguments count
3413 // a3: callee code entry
3414
3415 // The code below is made a lot easier because the calling code already sets
3416 // up actual and expected registers according to the contract if values are
3417 // passed in registers.
3418 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3419 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3420 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3421
3422 if (expected.is_immediate()) {
3423 ASSERT(actual.is_immediate());
3424 if (expected.immediate() == actual.immediate()) {
3425 definitely_matches = true;
3426 } else {
3427 li(a0, Operand(actual.immediate()));
3428 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3429 if (expected.immediate() == sentinel) {
3430 // Don't worry about adapting arguments for builtins that
3431 // don't want that done. Skip adaption code by making it look
3432 // like we have a match between expected and actual number of
3433 // arguments.
3434 definitely_matches = true;
3435 } else {
3436 li(a2, Operand(expected.immediate()));
3437 }
3438 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003439 } else if (actual.is_immediate()) {
3440 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3441 li(a0, Operand(actual.immediate()));
Steve Block6ded16b2010-05-10 14:33:55 +01003442 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003443 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
Steve Block6ded16b2010-05-10 14:33:55 +01003444 }
3445
3446 if (!definitely_matches) {
3447 if (!code_constant.is_null()) {
3448 li(a3, Operand(code_constant));
3449 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3450 }
3451
Steve Block44f0eee2011-05-26 01:26:41 +01003452 Handle<Code> adaptor =
3453 isolate()->builtins()->ArgumentsAdaptorTrampoline();
Steve Block6ded16b2010-05-10 14:33:55 +01003454 if (flag == CALL_FUNCTION) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003455 call_wrapper.BeforeCall(CallSize(adaptor));
Ben Murdoch257744e2011-11-30 15:57:28 +00003456 SetCallKind(t1, call_kind);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003457 Call(adaptor);
Ben Murdoch257744e2011-11-30 15:57:28 +00003458 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01003459 jmp(done);
Steve Block6ded16b2010-05-10 14:33:55 +01003460 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003461 SetCallKind(t1, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003462 Jump(adaptor, RelocInfo::CODE_TARGET);
Steve Block6ded16b2010-05-10 14:33:55 +01003463 }
3464 bind(&regular_invoke);
3465 }
3466}
3467
Steve Block44f0eee2011-05-26 01:26:41 +01003468
Steve Block6ded16b2010-05-10 14:33:55 +01003469void MacroAssembler::InvokeCode(Register code,
3470 const ParameterCount& expected,
3471 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003472 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003473 const CallWrapper& call_wrapper,
3474 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003475 // You can't call a function without a valid frame.
3476 ASSERT(flag == JUMP_FUNCTION || has_frame());
3477
Steve Block6ded16b2010-05-10 14:33:55 +01003478 Label done;
3479
Steve Block44f0eee2011-05-26 01:26:41 +01003480 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003481 call_wrapper, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003482 if (flag == CALL_FUNCTION) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003483 call_wrapper.BeforeCall(CallSize(code));
Ben Murdoch257744e2011-11-30 15:57:28 +00003484 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003485 Call(code);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003486 call_wrapper.AfterCall();
Steve Block6ded16b2010-05-10 14:33:55 +01003487 } else {
3488 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch257744e2011-11-30 15:57:28 +00003489 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003490 Jump(code);
3491 }
3492 // Continue here if InvokePrologue does handle the invocation due to
3493 // mismatched parameter counts.
3494 bind(&done);
3495}
3496
3497
3498void MacroAssembler::InvokeCode(Handle<Code> code,
3499 const ParameterCount& expected,
3500 const ParameterCount& actual,
3501 RelocInfo::Mode rmode,
Ben Murdoch257744e2011-11-30 15:57:28 +00003502 InvokeFlag flag,
3503 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003504 // You can't call a function without a valid frame.
3505 ASSERT(flag == JUMP_FUNCTION || has_frame());
3506
Steve Block6ded16b2010-05-10 14:33:55 +01003507 Label done;
3508
Ben Murdoch257744e2011-11-30 15:57:28 +00003509 InvokePrologue(expected, actual, code, no_reg, &done, flag,
3510 NullCallWrapper(), call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003511 if (flag == CALL_FUNCTION) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003512 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003513 Call(code, rmode);
3514 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003515 SetCallKind(t1, call_kind);
Steve Block6ded16b2010-05-10 14:33:55 +01003516 Jump(code, rmode);
3517 }
3518 // Continue here if InvokePrologue does handle the invocation due to
3519 // mismatched parameter counts.
3520 bind(&done);
3521}
3522
3523
3524void MacroAssembler::InvokeFunction(Register function,
3525 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003526 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003527 const CallWrapper& call_wrapper,
3528 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003529 // You can't call a function without a valid frame.
3530 ASSERT(flag == JUMP_FUNCTION || has_frame());
3531
Steve Block6ded16b2010-05-10 14:33:55 +01003532 // Contract with called JS functions requires that function is passed in a1.
3533 ASSERT(function.is(a1));
3534 Register expected_reg = a2;
3535 Register code_reg = a3;
3536
3537 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3538 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3539 lw(expected_reg,
3540 FieldMemOperand(code_reg,
3541 SharedFunctionInfo::kFormalParameterCountOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003542 sra(expected_reg, expected_reg, kSmiTagSize);
3543 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003544
3545 ParameterCount expected(expected_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003546 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003547}
3548
3549
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003550void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
Steve Block44f0eee2011-05-26 01:26:41 +01003551 const ParameterCount& actual,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003552 InvokeFlag flag,
3553 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003554 // You can't call a function without a valid frame.
3555 ASSERT(flag == JUMP_FUNCTION || has_frame());
Steve Block44f0eee2011-05-26 01:26:41 +01003556
3557 // Get the function and setup the context.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003558 li(a1, Operand(function));
Steve Block44f0eee2011-05-26 01:26:41 +01003559 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3560
Steve Block44f0eee2011-05-26 01:26:41 +01003561 ParameterCount expected(function->shared()->formal_parameter_count());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003562 // We call indirectly through the code field in the function to
3563 // allow recompilation to take effect without changing any of the
3564 // call sites.
3565 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3566 InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003567}
3568
3569
3570void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3571 Register map,
3572 Register scratch,
3573 Label* fail) {
3574 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3575 IsInstanceJSObjectType(map, scratch, fail);
3576}
3577
3578
3579void MacroAssembler::IsInstanceJSObjectType(Register map,
3580 Register scratch,
3581 Label* fail) {
3582 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003583 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3584 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
Steve Block44f0eee2011-05-26 01:26:41 +01003585}
3586
3587
3588void MacroAssembler::IsObjectJSStringType(Register object,
3589 Register scratch,
3590 Label* fail) {
3591 ASSERT(kNotStringTag != 0);
3592
3593 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3594 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3595 And(scratch, scratch, Operand(kIsNotStringMask));
3596 Branch(fail, ne, scratch, Operand(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01003597}
3598
3599
3600// ---------------------------------------------------------------------------
3601// Support functions.
3602
Steve Block44f0eee2011-05-26 01:26:41 +01003603
3604void MacroAssembler::TryGetFunctionPrototype(Register function,
3605 Register result,
3606 Register scratch,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003607 Label* miss,
3608 bool miss_on_bound_function) {
Steve Block44f0eee2011-05-26 01:26:41 +01003609 // Check that the receiver isn't a smi.
3610 JumpIfSmi(function, miss);
3611
3612 // Check that the function really is a function. Load map into result reg.
3613 GetObjectType(function, result, scratch);
3614 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3615
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003616 if (miss_on_bound_function) {
3617 lw(scratch,
3618 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3619 lw(scratch,
3620 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3621 And(scratch, scratch,
3622 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3623 Branch(miss, ne, scratch, Operand(zero_reg));
3624 }
3625
Steve Block44f0eee2011-05-26 01:26:41 +01003626 // Make sure that the function has an instance prototype.
3627 Label non_instance;
3628 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3629 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3630 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3631
3632 // Get the prototype or initial map from the function.
3633 lw(result,
3634 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3635
3636 // If the prototype or initial map is the hole, don't return it and
3637 // simply miss the cache instead. This will allow us to allocate a
3638 // prototype object on-demand in the runtime system.
3639 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3640 Branch(miss, eq, result, Operand(t8));
3641
3642 // If the function does not have an initial map, we're done.
3643 Label done;
3644 GetObjectType(result, scratch, scratch);
3645 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3646
3647 // Get the prototype from the initial map.
3648 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3649 jmp(&done);
3650
3651 // Non-instance prototype: Fetch prototype from constructor field
3652 // in initial map.
3653 bind(&non_instance);
3654 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3655
3656 // All done.
3657 bind(&done);
3658}
Steve Block6ded16b2010-05-10 14:33:55 +01003659
3660
Steve Block44f0eee2011-05-26 01:26:41 +01003661void MacroAssembler::GetObjectType(Register object,
3662 Register map,
3663 Register type_reg) {
3664 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3665 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3666}
Steve Block6ded16b2010-05-10 14:33:55 +01003667
3668
3669// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003670// Runtime calls.
Steve Block6ded16b2010-05-10 14:33:55 +01003671
Andrei Popescu31002712010-02-23 13:46:05 +00003672void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
3673 Register r1, const Operand& r2) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003674 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003675 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
Andrei Popescu31002712010-02-23 13:46:05 +00003676}
3677
3678
Steve Block44f0eee2011-05-26 01:26:41 +01003679void MacroAssembler::TailCallStub(CodeStub* stub) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003680 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Steve Block44f0eee2011-05-26 01:26:41 +01003681 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003682}
3683
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003684
Ben Murdoch257744e2011-11-30 15:57:28 +00003685static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3686 return ref0.address() - ref1.address();
3687}
3688
3689
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003690void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
3691 int stack_space) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003692 ExternalReference next_address =
3693 ExternalReference::handle_scope_next_address();
3694 const int kNextOffset = 0;
3695 const int kLimitOffset = AddressOffset(
3696 ExternalReference::handle_scope_limit_address(),
3697 next_address);
3698 const int kLevelOffset = AddressOffset(
3699 ExternalReference::handle_scope_level_address(),
3700 next_address);
3701
3702 // Allocate HandleScope in callee-save registers.
3703 li(s3, Operand(next_address));
3704 lw(s0, MemOperand(s3, kNextOffset));
3705 lw(s1, MemOperand(s3, kLimitOffset));
3706 lw(s2, MemOperand(s3, kLevelOffset));
3707 Addu(s2, s2, Operand(1));
3708 sw(s2, MemOperand(s3, kLevelOffset));
3709
3710 // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3711 // (4 bytes) will be placed. This is also built into the Simulator.
3712 // Set up the pointer to the returned value (a0). It was allocated in
3713 // EnterExitFrame.
3714 addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3715
3716 // Native call returns to the DirectCEntry stub which redirects to the
3717 // return address pushed on stack (could have moved after GC).
3718 // DirectCEntry stub itself is generated early and never moves.
3719 DirectCEntryStub stub;
3720 stub.GenerateCall(this, function);
3721
3722 // As mentioned above, on MIPS a pointer is returned - we need to dereference
3723 // it to get the actual return value (which is also a pointer).
3724 lw(v0, MemOperand(v0));
3725
3726 Label promote_scheduled_exception;
3727 Label delete_allocated_handles;
3728 Label leave_exit_frame;
3729
3730 // If result is non-zero, dereference to get the result value
3731 // otherwise set it to undefined.
3732 Label skip;
3733 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3734 Branch(&skip, eq, v0, Operand(zero_reg));
3735 lw(a0, MemOperand(v0));
3736 bind(&skip);
3737 mov(v0, a0);
3738
3739 // No more valid handles (the result handle was the last one). Restore
3740 // previous handle scope.
3741 sw(s0, MemOperand(s3, kNextOffset));
3742 if (emit_debug_code()) {
3743 lw(a1, MemOperand(s3, kLevelOffset));
3744 Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3745 }
3746 Subu(s2, s2, Operand(1));
3747 sw(s2, MemOperand(s3, kLevelOffset));
3748 lw(at, MemOperand(s3, kLimitOffset));
3749 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3750
3751 // Check if the function scheduled an exception.
3752 bind(&leave_exit_frame);
3753 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3754 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3755 lw(t1, MemOperand(at));
3756 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3757 li(s0, Operand(stack_space));
3758 LeaveExitFrame(false, s0);
3759 Ret();
3760
3761 bind(&promote_scheduled_exception);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003762 TailCallExternalReference(
3763 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
3764 0,
3765 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003766
3767 // HandleScope limit has changed. Delete allocated extensions.
3768 bind(&delete_allocated_handles);
3769 sw(s1, MemOperand(s3, kLimitOffset));
3770 mov(s0, v0);
3771 mov(a0, v0);
3772 PrepareCallCFunction(1, s1);
3773 li(a0, Operand(ExternalReference::isolate_address()));
3774 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3775 1);
3776 mov(v0, s0);
3777 jmp(&leave_exit_frame);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003778}
Ben Murdoch257744e2011-11-30 15:57:28 +00003779
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003780
3781bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3782 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
3783 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
Ben Murdoch257744e2011-11-30 15:57:28 +00003784}
3785
Andrei Popescu31002712010-02-23 13:46:05 +00003786
Steve Block6ded16b2010-05-10 14:33:55 +01003787void MacroAssembler::IllegalOperation(int num_arguments) {
3788 if (num_arguments > 0) {
3789 addiu(sp, sp, num_arguments * kPointerSize);
3790 }
3791 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3792}
3793
3794
Steve Block44f0eee2011-05-26 01:26:41 +01003795void MacroAssembler::IndexFromHash(Register hash,
3796 Register index) {
3797 // If the hash field contains an array index pick it out. The assert checks
3798 // that the constants for the maximum number of digits for an array index
3799 // cached in the hash field and the number of bits reserved for it does not
3800 // conflict.
3801 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3802 (1 << String::kArrayIndexValueBits));
3803 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3804 // the low kHashShift bits.
3805 STATIC_ASSERT(kSmiTag == 0);
3806 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3807 sll(index, hash, kSmiTagSize);
3808}
3809
3810
3811void MacroAssembler::ObjectToDoubleFPURegister(Register object,
3812 FPURegister result,
3813 Register scratch1,
3814 Register scratch2,
3815 Register heap_number_map,
3816 Label* not_number,
3817 ObjectToDoubleFlags flags) {
3818 Label done;
3819 if ((flags & OBJECT_NOT_SMI) == 0) {
3820 Label not_smi;
3821 JumpIfNotSmi(object, &not_smi);
3822 // Remove smi tag and convert to double.
3823 sra(scratch1, object, kSmiTagSize);
3824 mtc1(scratch1, result);
3825 cvt_d_w(result, result);
3826 Branch(&done);
3827 bind(&not_smi);
3828 }
3829 // Check for heap number and load double value from it.
3830 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3831 Branch(not_number, ne, scratch1, Operand(heap_number_map));
3832
3833 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
3834 // If exponent is all ones the number is either a NaN or +/-Infinity.
3835 Register exponent = scratch1;
3836 Register mask_reg = scratch2;
3837 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
3838 li(mask_reg, HeapNumber::kExponentMask);
3839
3840 And(exponent, exponent, mask_reg);
3841 Branch(not_number, eq, exponent, Operand(mask_reg));
3842 }
3843 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
3844 bind(&done);
3845}
3846
3847
Steve Block44f0eee2011-05-26 01:26:41 +01003848void MacroAssembler::SmiToDoubleFPURegister(Register smi,
3849 FPURegister value,
3850 Register scratch1) {
3851 sra(scratch1, smi, kSmiTagSize);
3852 mtc1(scratch1, value);
3853 cvt_d_w(value, value);
3854}
3855
3856
Ben Murdoch257744e2011-11-30 15:57:28 +00003857void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3858 Register left,
3859 Register right,
3860 Register overflow_dst,
3861 Register scratch) {
3862 ASSERT(!dst.is(overflow_dst));
3863 ASSERT(!dst.is(scratch));
3864 ASSERT(!overflow_dst.is(scratch));
3865 ASSERT(!overflow_dst.is(left));
3866 ASSERT(!overflow_dst.is(right));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003867
3868 if (left.is(right) && dst.is(left)) {
3869 ASSERT(!dst.is(t9));
3870 ASSERT(!scratch.is(t9));
3871 ASSERT(!left.is(t9));
3872 ASSERT(!right.is(t9));
3873 ASSERT(!overflow_dst.is(t9));
3874 mov(t9, right);
3875 right = t9;
3876 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003877
Ben Murdoch257744e2011-11-30 15:57:28 +00003878 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003879 mov(scratch, left); // Preserve left.
3880 addu(dst, left, right); // Left is overwritten.
3881 xor_(scratch, dst, scratch); // Original left.
3882 xor_(overflow_dst, dst, right);
3883 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003884 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003885 mov(scratch, right); // Preserve right.
3886 addu(dst, left, right); // Right is overwritten.
3887 xor_(scratch, dst, scratch); // Original right.
3888 xor_(overflow_dst, dst, left);
3889 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003890 } else {
3891 addu(dst, left, right);
3892 xor_(overflow_dst, dst, left);
3893 xor_(scratch, dst, right);
3894 and_(overflow_dst, scratch, overflow_dst);
3895 }
3896}
3897
3898
3899void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3900 Register left,
3901 Register right,
3902 Register overflow_dst,
3903 Register scratch) {
3904 ASSERT(!dst.is(overflow_dst));
3905 ASSERT(!dst.is(scratch));
3906 ASSERT(!overflow_dst.is(scratch));
3907 ASSERT(!overflow_dst.is(left));
3908 ASSERT(!overflow_dst.is(right));
Ben Murdoch257744e2011-11-30 15:57:28 +00003909 ASSERT(!scratch.is(left));
3910 ASSERT(!scratch.is(right));
3911
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003912 // This happens with some crankshaft code. Since Subu works fine if
3913 // left == right, let's not make that restriction here.
3914 if (left.is(right)) {
3915 mov(dst, zero_reg);
3916 mov(overflow_dst, zero_reg);
3917 return;
3918 }
3919
Ben Murdoch257744e2011-11-30 15:57:28 +00003920 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003921 mov(scratch, left); // Preserve left.
3922 subu(dst, left, right); // Left is overwritten.
3923 xor_(overflow_dst, dst, scratch); // scratch is original left.
3924 xor_(scratch, scratch, right); // scratch is original left.
3925 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003926 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003927 mov(scratch, right); // Preserve right.
3928 subu(dst, left, right); // Right is overwritten.
3929 xor_(overflow_dst, dst, left);
3930 xor_(scratch, left, scratch); // Original right.
3931 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00003932 } else {
3933 subu(dst, left, right);
3934 xor_(overflow_dst, dst, left);
3935 xor_(scratch, left, right);
3936 and_(overflow_dst, scratch, overflow_dst);
3937 }
3938}
3939
3940
Steve Block44f0eee2011-05-26 01:26:41 +01003941void MacroAssembler::CallRuntime(const Runtime::Function* f,
3942 int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003943 // All parameters are on the stack. v0 has the return value after call.
3944
3945 // If the expected number of arguments of the runtime function is
3946 // constant, we check that the actual number of arguments match the
3947 // expectation.
3948 if (f->nargs >= 0 && f->nargs != num_arguments) {
3949 IllegalOperation(num_arguments);
3950 return;
3951 }
3952
3953 // TODO(1236192): Most runtime routines don't need the number of
3954 // arguments passed in because it is constant. At some point we
3955 // should remove this need and make the runtime routine entry code
3956 // smarter.
3957 li(a0, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01003958 li(a1, Operand(ExternalReference(f, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01003959 CEntryStub stub(1);
3960 CallStub(&stub);
Andrei Popescu31002712010-02-23 13:46:05 +00003961}
3962
3963
Steve Block44f0eee2011-05-26 01:26:41 +01003964void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
3965 const Runtime::Function* function = Runtime::FunctionForId(id);
3966 li(a0, Operand(function->nargs));
3967 li(a1, Operand(ExternalReference(function, isolate())));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003968 CEntryStub stub(1, kSaveFPRegs);
Steve Block44f0eee2011-05-26 01:26:41 +01003969 CallStub(&stub);
3970}
3971
3972
Andrei Popescu31002712010-02-23 13:46:05 +00003973void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01003974 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
3975}
3976
3977
Steve Block44f0eee2011-05-26 01:26:41 +01003978void MacroAssembler::CallExternalReference(const ExternalReference& ext,
3979 int num_arguments) {
3980 li(a0, Operand(num_arguments));
3981 li(a1, Operand(ext));
3982
3983 CEntryStub stub(1);
3984 CallStub(&stub);
3985}
3986
3987
Steve Block6ded16b2010-05-10 14:33:55 +01003988void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
3989 int num_arguments,
3990 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01003991 // TODO(1236192): Most runtime routines don't need the number of
3992 // arguments passed in because it is constant. At some point we
3993 // should remove this need and make the runtime routine entry code
3994 // smarter.
3995 li(a0, Operand(num_arguments));
3996 JumpToExternalReference(ext);
Andrei Popescu31002712010-02-23 13:46:05 +00003997}
3998
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003999
Steve Block6ded16b2010-05-10 14:33:55 +01004000void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
Andrei Popescu31002712010-02-23 13:46:05 +00004001 int num_arguments,
4002 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01004003 TailCallExternalReference(ExternalReference(fid, isolate()),
4004 num_arguments,
4005 result_size);
Andrei Popescu31002712010-02-23 13:46:05 +00004006}
4007
4008
Steve Block6ded16b2010-05-10 14:33:55 +01004009void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Block44f0eee2011-05-26 01:26:41 +01004010 li(a1, Operand(builtin));
4011 CEntryStub stub(1);
4012 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00004013}
4014
4015
4016void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Ben Murdoch257744e2011-11-30 15:57:28 +00004017 InvokeFlag flag,
4018 const CallWrapper& call_wrapper) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004019 // You can't call a builtin without a valid frame.
4020 ASSERT(flag == JUMP_FUNCTION || has_frame());
4021
Steve Block44f0eee2011-05-26 01:26:41 +01004022 GetBuiltinEntry(t9, id);
Ben Murdoch257744e2011-11-30 15:57:28 +00004023 if (flag == CALL_FUNCTION) {
4024 call_wrapper.BeforeCall(CallSize(t9));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004025 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01004026 Call(t9);
Ben Murdoch257744e2011-11-30 15:57:28 +00004027 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01004028 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00004029 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004030 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01004031 Jump(t9);
4032 }
4033}
4034
4035
4036void MacroAssembler::GetBuiltinFunction(Register target,
4037 Builtins::JavaScript id) {
4038 // Load the builtins object into target register.
4039 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4040 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4041 // Load the JavaScript builtin function from the builtins object.
4042 lw(target, FieldMemOperand(target,
4043 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
Andrei Popescu31002712010-02-23 13:46:05 +00004044}
4045
4046
4047void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Steve Block44f0eee2011-05-26 01:26:41 +01004048 ASSERT(!target.is(a1));
4049 GetBuiltinFunction(a1, id);
4050 // Load the code entry point from the builtins object.
4051 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Andrei Popescu31002712010-02-23 13:46:05 +00004052}
4053
4054
4055void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4056 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01004057 if (FLAG_native_code_counters && counter->Enabled()) {
4058 li(scratch1, Operand(value));
4059 li(scratch2, Operand(ExternalReference(counter)));
4060 sw(scratch1, MemOperand(scratch2));
4061 }
Andrei Popescu31002712010-02-23 13:46:05 +00004062}
4063
4064
4065void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4066 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01004067 ASSERT(value > 0);
4068 if (FLAG_native_code_counters && counter->Enabled()) {
4069 li(scratch2, Operand(ExternalReference(counter)));
4070 lw(scratch1, MemOperand(scratch2));
4071 Addu(scratch1, scratch1, Operand(value));
4072 sw(scratch1, MemOperand(scratch2));
4073 }
Andrei Popescu31002712010-02-23 13:46:05 +00004074}
4075
4076
4077void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4078 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01004079 ASSERT(value > 0);
4080 if (FLAG_native_code_counters && counter->Enabled()) {
4081 li(scratch2, Operand(ExternalReference(counter)));
4082 lw(scratch1, MemOperand(scratch2));
4083 Subu(scratch1, scratch1, Operand(value));
4084 sw(scratch1, MemOperand(scratch2));
4085 }
Andrei Popescu31002712010-02-23 13:46:05 +00004086}
4087
4088
Steve Block6ded16b2010-05-10 14:33:55 +01004089// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00004090// Debugging.
Andrei Popescu31002712010-02-23 13:46:05 +00004091
4092void MacroAssembler::Assert(Condition cc, const char* msg,
4093 Register rs, Operand rt) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004094 if (emit_debug_code())
Steve Block44f0eee2011-05-26 01:26:41 +01004095 Check(cc, msg, rs, rt);
4096}
4097
4098
4099void MacroAssembler::AssertRegisterIsRoot(Register reg,
4100 Heap::RootListIndex index) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004101 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01004102 LoadRoot(at, index);
4103 Check(eq, "Register did not match expected root", reg, Operand(at));
4104 }
4105}
4106
4107
4108void MacroAssembler::AssertFastElements(Register elements) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004109 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01004110 ASSERT(!elements.is(at));
4111 Label ok;
Ben Murdoch257744e2011-11-30 15:57:28 +00004112 push(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01004113 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4114 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4115 Branch(&ok, eq, elements, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004116 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4117 Branch(&ok, eq, elements, Operand(at));
Steve Block44f0eee2011-05-26 01:26:41 +01004118 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4119 Branch(&ok, eq, elements, Operand(at));
4120 Abort("JSObject with fast elements map has slow elements");
4121 bind(&ok);
Ben Murdoch257744e2011-11-30 15:57:28 +00004122 pop(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01004123 }
Andrei Popescu31002712010-02-23 13:46:05 +00004124}
4125
4126
4127void MacroAssembler::Check(Condition cc, const char* msg,
4128 Register rs, Operand rt) {
Steve Block44f0eee2011-05-26 01:26:41 +01004129 Label L;
4130 Branch(&L, cc, rs, rt);
4131 Abort(msg);
Ben Murdoch257744e2011-11-30 15:57:28 +00004132 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01004133 bind(&L);
Andrei Popescu31002712010-02-23 13:46:05 +00004134}
4135
4136
4137void MacroAssembler::Abort(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01004138 Label abort_start;
4139 bind(&abort_start);
4140 // We want to pass the msg string like a smi to avoid GC
4141 // problems, however msg is not guaranteed to be aligned
4142 // properly. Instead, we pass an aligned pointer that is
4143 // a proper v8 smi, but also pass the alignment difference
4144 // from the real pointer as a smi.
4145 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4146 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4147 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4148#ifdef DEBUG
4149 if (msg != NULL) {
4150 RecordComment("Abort message: ");
4151 RecordComment(msg);
4152 }
4153#endif
Steve Block44f0eee2011-05-26 01:26:41 +01004154
4155 li(a0, Operand(p0));
Ben Murdoch257744e2011-11-30 15:57:28 +00004156 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01004157 li(a0, Operand(Smi::FromInt(p1 - p0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004158 push(a0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004159 // Disable stub call restrictions to always allow calls to abort.
4160 if (!has_frame_) {
4161 // We don't actually want to generate a pile of code for this, so just
4162 // claim there is a stack frame, without generating one.
4163 FrameScope scope(this, StackFrame::NONE);
4164 CallRuntime(Runtime::kAbort, 2);
4165 } else {
4166 CallRuntime(Runtime::kAbort, 2);
4167 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004168 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01004169 if (is_trampoline_pool_blocked()) {
4170 // If the calling code cares about the exact number of
4171 // instructions generated, we insert padding here to keep the size
4172 // of the Abort macro constant.
4173 // Currently in debug mode with debug_code enabled the number of
4174 // generated instructions is 14, so we use this as a maximum value.
4175 static const int kExpectedAbortInstructions = 14;
4176 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4177 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4178 while (abort_instructions++ < kExpectedAbortInstructions) {
4179 nop();
4180 }
4181 }
4182}
4183
4184
4185void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4186 if (context_chain_length > 0) {
4187 // Move up the chain of contexts to the context containing the slot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004188 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01004189 for (int i = 1; i < context_chain_length; i++) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004190 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01004191 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004192 } else {
4193 // Slot is in the current function context. Move it into the
4194 // destination register in case we store into it (the write barrier
4195 // cannot be allowed to destroy the context in esi).
4196 Move(dst, cp);
4197 }
Steve Block44f0eee2011-05-26 01:26:41 +01004198}
4199
4200
4201void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4202 // Load the global or builtins object from the current context.
4203 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4204 // Load the global context from the global or builtins object.
4205 lw(function, FieldMemOperand(function,
4206 GlobalObject::kGlobalContextOffset));
4207 // Load the function from the global context.
4208 lw(function, MemOperand(function, Context::SlotOffset(index)));
4209}
4210
4211
4212void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4213 Register map,
4214 Register scratch) {
4215 // Load the initial map. The global functions all have initial maps.
4216 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004217 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01004218 Label ok, fail;
Ben Murdoch257744e2011-11-30 15:57:28 +00004219 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +01004220 Branch(&ok);
4221 bind(&fail);
4222 Abort("Global functions must have initial map");
4223 bind(&ok);
4224 }
Andrei Popescu31002712010-02-23 13:46:05 +00004225}
4226
Steve Block6ded16b2010-05-10 14:33:55 +01004227
4228void MacroAssembler::EnterFrame(StackFrame::Type type) {
4229 addiu(sp, sp, -5 * kPointerSize);
Steve Block44f0eee2011-05-26 01:26:41 +01004230 li(t8, Operand(Smi::FromInt(type)));
4231 li(t9, Operand(CodeObject()));
Steve Block6ded16b2010-05-10 14:33:55 +01004232 sw(ra, MemOperand(sp, 4 * kPointerSize));
4233 sw(fp, MemOperand(sp, 3 * kPointerSize));
4234 sw(cp, MemOperand(sp, 2 * kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004235 sw(t8, MemOperand(sp, 1 * kPointerSize));
4236 sw(t9, MemOperand(sp, 0 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01004237 addiu(fp, sp, 3 * kPointerSize);
4238}
4239
4240
4241void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4242 mov(sp, fp);
4243 lw(fp, MemOperand(sp, 0 * kPointerSize));
4244 lw(ra, MemOperand(sp, 1 * kPointerSize));
4245 addiu(sp, sp, 2 * kPointerSize);
4246}
4247
4248
Ben Murdoch257744e2011-11-30 15:57:28 +00004249void MacroAssembler::EnterExitFrame(bool save_doubles,
4250 int stack_space) {
4251 // Setup the frame structure on the stack.
4252 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4253 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4254 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
Steve Block6ded16b2010-05-10 14:33:55 +01004255
Ben Murdoch257744e2011-11-30 15:57:28 +00004256 // This is how the stack will look:
4257 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4258 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4259 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4260 // [fp - 1 (==kSPOffset)] - sp of the called function
4261 // [fp - 2 (==kCodeOffset)] - CodeObject
4262 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4263 // new stack (will contain saved ra)
Steve Block6ded16b2010-05-10 14:33:55 +01004264
4265 // Save registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00004266 addiu(sp, sp, -4 * kPointerSize);
4267 sw(ra, MemOperand(sp, 3 * kPointerSize));
4268 sw(fp, MemOperand(sp, 2 * kPointerSize));
4269 addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
Steve Block6ded16b2010-05-10 14:33:55 +01004270
Ben Murdoch257744e2011-11-30 15:57:28 +00004271 if (emit_debug_code()) {
4272 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4273 }
4274
4275 li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
4276 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01004277
4278 // Save the frame pointer and the context in top.
Ben Murdoch589d6972011-11-30 16:04:58 +00004279 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004280 sw(fp, MemOperand(t8));
Ben Murdoch589d6972011-11-30 16:04:58 +00004281 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004282 sw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004283
Ben Murdoch257744e2011-11-30 15:57:28 +00004284 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
Steve Block44f0eee2011-05-26 01:26:41 +01004285 if (save_doubles) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004286 // The stack must be allign to 0 modulo 8 for stores with sdc1.
Steve Block44f0eee2011-05-26 01:26:41 +01004287 ASSERT(kDoubleSize == frame_alignment);
Ben Murdoch257744e2011-11-30 15:57:28 +00004288 if (frame_alignment > 0) {
4289 ASSERT(IsPowerOf2(frame_alignment));
4290 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4291 }
4292 int space = FPURegister::kNumRegisters * kDoubleSize;
Steve Block44f0eee2011-05-26 01:26:41 +01004293 Subu(sp, sp, Operand(space));
4294 // Remember: we only need to save every 2nd double FPU value.
4295 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4296 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00004297 sdc1(reg, MemOperand(sp, i * kDoubleSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004298 }
Steve Block44f0eee2011-05-26 01:26:41 +01004299 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004300
4301 // Reserve place for the return address, stack space and an optional slot
4302 // (used by the DirectCEntryStub to hold the return value if a struct is
4303 // returned) and align the frame preparing for calling the runtime function.
4304 ASSERT(stack_space >= 0);
4305 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4306 if (frame_alignment > 0) {
4307 ASSERT(IsPowerOf2(frame_alignment));
4308 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4309 }
4310
4311 // Set the exit frame sp value to point just before the return address
4312 // location.
4313 addiu(at, sp, kPointerSize);
4314 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01004315}
4316
4317
Ben Murdoch257744e2011-11-30 15:57:28 +00004318void MacroAssembler::LeaveExitFrame(bool save_doubles,
4319 Register argument_count) {
Steve Block44f0eee2011-05-26 01:26:41 +01004320 // Optionally restore all double registers.
4321 if (save_doubles) {
Steve Block44f0eee2011-05-26 01:26:41 +01004322 // Remember: we only need to restore every 2nd double FPU value.
Ben Murdoch257744e2011-11-30 15:57:28 +00004323 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01004324 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4325 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00004326 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004327 }
4328 }
4329
Steve Block6ded16b2010-05-10 14:33:55 +01004330 // Clear top frame.
Ben Murdoch589d6972011-11-30 16:04:58 +00004331 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004332 sw(zero_reg, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004333
4334 // Restore current context from top and clear it in debug mode.
Ben Murdoch589d6972011-11-30 16:04:58 +00004335 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004336 lw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004337#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01004338 sw(a3, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004339#endif
4340
4341 // Pop the arguments, restore registers, and return.
4342 mov(sp, fp); // Respect ABI stack constraint.
Ben Murdoch257744e2011-11-30 15:57:28 +00004343 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4344 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4345 addiu(sp, sp, 8);
4346 if (argument_count.is_valid()) {
4347 sll(t8, argument_count, kPointerSizeLog2);
4348 addu(sp, sp, t8);
4349 }
Steve Block6ded16b2010-05-10 14:33:55 +01004350}
4351
4352
Steve Block44f0eee2011-05-26 01:26:41 +01004353void MacroAssembler::InitializeNewString(Register string,
4354 Register length,
4355 Heap::RootListIndex map_index,
4356 Register scratch1,
4357 Register scratch2) {
4358 sll(scratch1, length, kSmiTagSize);
4359 LoadRoot(scratch2, map_index);
4360 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4361 li(scratch1, Operand(String::kEmptyHashField));
4362 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4363 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4364}
4365
4366
4367int MacroAssembler::ActivationFrameAlignment() {
4368#if defined(V8_HOST_ARCH_MIPS)
4369 // Running on the real platform. Use the alignment as mandated by the local
4370 // environment.
4371 // Note: This will break if we ever start generating snapshots on one Mips
4372 // platform for another Mips platform with a different alignment.
4373 return OS::ActivationFrameAlignment();
4374#else // defined(V8_HOST_ARCH_MIPS)
4375 // If we are using the simulator then we should always align to the expected
4376 // alignment. As the simulator is used to generate snapshots we do not know
4377 // if the target platform will need alignment, so this is controlled from a
4378 // flag.
4379 return FLAG_sim_stack_alignment;
4380#endif // defined(V8_HOST_ARCH_MIPS)
4381}
4382
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004383
Ben Murdoch257744e2011-11-30 15:57:28 +00004384void MacroAssembler::AssertStackIsAligned() {
4385 if (emit_debug_code()) {
4386 const int frame_alignment = ActivationFrameAlignment();
4387 const int frame_alignment_mask = frame_alignment - 1;
Steve Block44f0eee2011-05-26 01:26:41 +01004388
Ben Murdoch257744e2011-11-30 15:57:28 +00004389 if (frame_alignment > kPointerSize) {
4390 Label alignment_as_expected;
4391 ASSERT(IsPowerOf2(frame_alignment));
4392 andi(at, sp, frame_alignment_mask);
4393 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4394 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4395 stop("Unexpected stack alignment");
4396 bind(&alignment_as_expected);
4397 }
Steve Block6ded16b2010-05-10 14:33:55 +01004398 }
Steve Block6ded16b2010-05-10 14:33:55 +01004399}
4400
Steve Block44f0eee2011-05-26 01:26:41 +01004401
Steve Block44f0eee2011-05-26 01:26:41 +01004402void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4403 Register reg,
4404 Register scratch,
4405 Label* not_power_of_two_or_zero) {
4406 Subu(scratch, reg, Operand(1));
4407 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4408 scratch, Operand(zero_reg));
4409 and_(at, scratch, reg); // In the delay slot.
4410 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4411}
4412
4413
4414void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4415 Register reg2,
4416 Label* on_not_both_smi) {
4417 STATIC_ASSERT(kSmiTag == 0);
4418 ASSERT_EQ(1, kSmiTagMask);
4419 or_(at, reg1, reg2);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004420 JumpIfNotSmi(at, on_not_both_smi);
Steve Block44f0eee2011-05-26 01:26:41 +01004421}
4422
4423
4424void MacroAssembler::JumpIfEitherSmi(Register reg1,
4425 Register reg2,
4426 Label* on_either_smi) {
4427 STATIC_ASSERT(kSmiTag == 0);
4428 ASSERT_EQ(1, kSmiTagMask);
4429 // Both Smi tags must be 1 (not Smi).
4430 and_(at, reg1, reg2);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004431 JumpIfSmi(at, on_either_smi);
Steve Block44f0eee2011-05-26 01:26:41 +01004432}
4433
4434
4435void MacroAssembler::AbortIfSmi(Register object) {
4436 STATIC_ASSERT(kSmiTag == 0);
4437 andi(at, object, kSmiTagMask);
4438 Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4439}
4440
4441
4442void MacroAssembler::AbortIfNotSmi(Register object) {
4443 STATIC_ASSERT(kSmiTag == 0);
4444 andi(at, object, kSmiTagMask);
4445 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4446}
4447
4448
Ben Murdoch257744e2011-11-30 15:57:28 +00004449void MacroAssembler::AbortIfNotString(Register object) {
4450 STATIC_ASSERT(kSmiTag == 0);
4451 And(t0, object, Operand(kSmiTagMask));
4452 Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4453 push(object);
4454 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4455 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4456 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4457 pop(object);
4458}
4459
4460
Steve Block44f0eee2011-05-26 01:26:41 +01004461void MacroAssembler::AbortIfNotRootValue(Register src,
4462 Heap::RootListIndex root_value_index,
4463 const char* message) {
4464 ASSERT(!src.is(at));
4465 LoadRoot(at, root_value_index);
4466 Assert(eq, message, src, Operand(at));
4467}
4468
4469
4470void MacroAssembler::JumpIfNotHeapNumber(Register object,
4471 Register heap_number_map,
4472 Register scratch,
4473 Label* on_not_heap_number) {
4474 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4475 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4476 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4477}
4478
4479
4480void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4481 Register first,
4482 Register second,
4483 Register scratch1,
4484 Register scratch2,
4485 Label* failure) {
4486 // Test that both first and second are sequential ASCII strings.
4487 // Assume that they are non-smis.
4488 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4489 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4490 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4491 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4492
4493 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4494 scratch2,
4495 scratch1,
4496 scratch2,
4497 failure);
4498}
4499
4500
4501void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4502 Register second,
4503 Register scratch1,
4504 Register scratch2,
4505 Label* failure) {
4506 // Check that neither is a smi.
4507 STATIC_ASSERT(kSmiTag == 0);
4508 And(scratch1, first, Operand(second));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004509 JumpIfSmi(scratch1, failure);
Steve Block44f0eee2011-05-26 01:26:41 +01004510 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4511 second,
4512 scratch1,
4513 scratch2,
4514 failure);
4515}
4516
4517
4518void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4519 Register first,
4520 Register second,
4521 Register scratch1,
4522 Register scratch2,
4523 Label* failure) {
4524 int kFlatAsciiStringMask =
4525 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4526 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4527 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4528 andi(scratch1, first, kFlatAsciiStringMask);
4529 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4530 andi(scratch2, second, kFlatAsciiStringMask);
4531 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4532}
4533
4534
4535void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4536 Register scratch,
4537 Label* failure) {
4538 int kFlatAsciiStringMask =
4539 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4540 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4541 And(scratch, type, Operand(kFlatAsciiStringMask));
4542 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4543}
4544
4545
4546static const int kRegisterPassedArguments = 4;
4547
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004548int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4549 int num_double_arguments) {
4550 int stack_passed_words = 0;
4551 num_reg_arguments += 2 * num_double_arguments;
4552
4553 // Up to four simple arguments are passed in registers a0..a3.
4554 if (num_reg_arguments > kRegisterPassedArguments) {
4555 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4556 }
4557 stack_passed_words += kCArgSlotCount;
4558 return stack_passed_words;
4559}
4560
4561
4562void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4563 int num_double_arguments,
4564 Register scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +01004565 int frame_alignment = ActivationFrameAlignment();
4566
Steve Block44f0eee2011-05-26 01:26:41 +01004567 // Up to four simple arguments are passed in registers a0..a3.
4568 // Those four arguments must have reserved argument slots on the stack for
4569 // mips, even though those argument slots are not normally used.
4570 // Remaining arguments are pushed on the stack, above (higher address than)
4571 // the argument slots.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004572 int stack_passed_arguments = CalculateStackPassedWords(
4573 num_reg_arguments, num_double_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004574 if (frame_alignment > kPointerSize) {
4575 // Make stack end at alignment and make room for num_arguments - 4 words
4576 // and the original value of sp.
4577 mov(scratch, sp);
4578 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4579 ASSERT(IsPowerOf2(frame_alignment));
4580 And(sp, sp, Operand(-frame_alignment));
4581 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4582 } else {
4583 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4584 }
4585}
4586
4587
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004588void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4589 Register scratch) {
4590 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4591}
4592
4593
Steve Block44f0eee2011-05-26 01:26:41 +01004594void MacroAssembler::CallCFunction(ExternalReference function,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004595 int num_reg_arguments,
4596 int num_double_arguments) {
4597 li(t8, Operand(function));
4598 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004599}
4600
4601
4602void MacroAssembler::CallCFunction(Register function,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004603 int num_reg_arguments,
4604 int num_double_arguments) {
4605 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
4606}
4607
4608
4609void MacroAssembler::CallCFunction(ExternalReference function,
Steve Block44f0eee2011-05-26 01:26:41 +01004610 int num_arguments) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004611 CallCFunction(function, num_arguments, 0);
4612}
4613
4614
4615void MacroAssembler::CallCFunction(Register function,
4616 int num_arguments) {
4617 CallCFunction(function, num_arguments, 0);
Steve Block44f0eee2011-05-26 01:26:41 +01004618}
4619
4620
4621void MacroAssembler::CallCFunctionHelper(Register function,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004622 int num_reg_arguments,
4623 int num_double_arguments) {
4624 ASSERT(has_frame());
Steve Block44f0eee2011-05-26 01:26:41 +01004625 // Make sure that the stack is aligned before calling a C function unless
4626 // running in the simulator. The simulator has its own alignment check which
4627 // provides more information.
4628 // The argument stots are presumed to have been set up by
4629 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4630
4631#if defined(V8_HOST_ARCH_MIPS)
4632 if (emit_debug_code()) {
4633 int frame_alignment = OS::ActivationFrameAlignment();
4634 int frame_alignment_mask = frame_alignment - 1;
4635 if (frame_alignment > kPointerSize) {
4636 ASSERT(IsPowerOf2(frame_alignment));
4637 Label alignment_as_expected;
4638 And(at, sp, Operand(frame_alignment_mask));
4639 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4640 // Don't use Check here, as it will call Runtime_Abort possibly
4641 // re-entering here.
4642 stop("Unexpected alignment in CallCFunction");
4643 bind(&alignment_as_expected);
4644 }
4645 }
4646#endif // V8_HOST_ARCH_MIPS
4647
4648 // Just call directly. The function called cannot cause a GC, or
4649 // allow preemption, so the return address in the link register
4650 // stays correct.
Steve Block44f0eee2011-05-26 01:26:41 +01004651
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004652 if (!function.is(t9)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004653 mov(t9, function);
Steve Block44f0eee2011-05-26 01:26:41 +01004654 function = t9;
4655 }
4656
4657 Call(function);
4658
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004659 int stack_passed_arguments = CalculateStackPassedWords(
4660 num_reg_arguments, num_double_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004661
4662 if (OS::ActivationFrameAlignment() > kPointerSize) {
4663 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4664 } else {
4665 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4666 }
4667}
4668
4669
4670#undef BRANCH_ARGS_CHECK
4671
4672
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004673void MacroAssembler::PatchRelocatedValue(Register li_location,
4674 Register scratch,
4675 Register new_value) {
4676 lw(scratch, MemOperand(li_location));
4677 // At this point scratch is a lui(at, ...) instruction.
4678 if (emit_debug_code()) {
4679 And(scratch, scratch, kOpcodeMask);
4680 Check(eq, "The instruction to patch should be a lui.",
4681 scratch, Operand(LUI));
4682 lw(scratch, MemOperand(li_location));
4683 }
4684 srl(t9, new_value, kImm16Bits);
4685 Ins(scratch, t9, 0, kImm16Bits);
4686 sw(scratch, MemOperand(li_location));
4687
4688 lw(scratch, MemOperand(li_location, kInstrSize));
4689 // scratch is now ori(at, ...).
4690 if (emit_debug_code()) {
4691 And(scratch, scratch, kOpcodeMask);
4692 Check(eq, "The instruction to patch should be an ori.",
4693 scratch, Operand(ORI));
4694 lw(scratch, MemOperand(li_location, kInstrSize));
4695 }
4696 Ins(scratch, new_value, 0, kImm16Bits);
4697 sw(scratch, MemOperand(li_location, kInstrSize));
4698
4699 // Update the I-cache so the new lui and ori can be executed.
4700 FlushICache(li_location, 2);
4701}
4702
4703void MacroAssembler::GetRelocatedValue(Register li_location,
4704 Register value,
4705 Register scratch) {
4706 lw(value, MemOperand(li_location));
4707 if (emit_debug_code()) {
4708 And(value, value, kOpcodeMask);
4709 Check(eq, "The instruction should be a lui.",
4710 value, Operand(LUI));
4711 lw(value, MemOperand(li_location));
4712 }
4713
4714 // value now holds a lui instruction. Extract the immediate.
4715 sll(value, value, kImm16Bits);
4716
4717 lw(scratch, MemOperand(li_location, kInstrSize));
4718 if (emit_debug_code()) {
4719 And(scratch, scratch, kOpcodeMask);
4720 Check(eq, "The instruction should be an ori.",
4721 scratch, Operand(ORI));
4722 lw(scratch, MemOperand(li_location, kInstrSize));
4723 }
4724 // "scratch" now holds an ori instruction. Extract the immediate.
4725 andi(scratch, scratch, kImm16Mask);
4726
4727 // Merge the results.
4728 or_(value, value, scratch);
4729}
4730
4731
4732void MacroAssembler::CheckPageFlag(
4733 Register object,
4734 Register scratch,
4735 int mask,
4736 Condition cc,
4737 Label* condition_met) {
4738 And(scratch, object, Operand(~Page::kPageAlignmentMask));
4739 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4740 And(scratch, scratch, Operand(mask));
4741 Branch(condition_met, cc, scratch, Operand(zero_reg));
4742}
4743
4744
4745void MacroAssembler::JumpIfBlack(Register object,
4746 Register scratch0,
4747 Register scratch1,
4748 Label* on_black) {
4749 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4750 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4751}
4752
4753
4754void MacroAssembler::HasColor(Register object,
4755 Register bitmap_scratch,
4756 Register mask_scratch,
4757 Label* has_color,
4758 int first_bit,
4759 int second_bit) {
4760 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
4761 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
4762
4763 GetMarkBits(object, bitmap_scratch, mask_scratch);
4764
4765 Label other_color, word_boundary;
4766 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4767 And(t8, t9, Operand(mask_scratch));
4768 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
4769 // Shift left 1 by adding.
4770 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
4771 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
4772 And(t8, t9, Operand(mask_scratch));
4773 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
4774 jmp(&other_color);
4775
4776 bind(&word_boundary);
4777 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
4778 And(t9, t9, Operand(1));
4779 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
4780 bind(&other_color);
4781}
4782
4783
4784// Detect some, but not all, common pointer-free objects. This is used by the
4785// incremental write barrier which doesn't care about oddballs (they are always
4786// marked black immediately so this code is not hit).
4787void MacroAssembler::JumpIfDataObject(Register value,
4788 Register scratch,
4789 Label* not_data_object) {
4790 ASSERT(!AreAliased(value, scratch, t8, no_reg));
4791 Label is_data_object;
4792 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
4793 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4794 Branch(&is_data_object, eq, t8, Operand(scratch));
4795 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4796 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4797 // If it's a string and it's not a cons string then it's an object containing
4798 // no GC pointers.
4799 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4800 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
4801 Branch(not_data_object, ne, t8, Operand(zero_reg));
4802 bind(&is_data_object);
4803}
4804
4805
4806void MacroAssembler::GetMarkBits(Register addr_reg,
4807 Register bitmap_reg,
4808 Register mask_reg) {
4809 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
4810 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
4811 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4812 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4813 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
4814 sll(t8, t8, kPointerSizeLog2);
4815 Addu(bitmap_reg, bitmap_reg, t8);
4816 li(t8, Operand(1));
4817 sllv(mask_reg, t8, mask_reg);
4818}
4819
4820
4821void MacroAssembler::EnsureNotWhite(
4822 Register value,
4823 Register bitmap_scratch,
4824 Register mask_scratch,
4825 Register load_scratch,
4826 Label* value_is_white_and_not_data) {
4827 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
4828 GetMarkBits(value, bitmap_scratch, mask_scratch);
4829
4830 // If the value is black or grey we don't need to do anything.
4831 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4832 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4833 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4834 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4835
4836 Label done;
4837
4838 // Since both black and grey have a 1 in the first position and white does
4839 // not have a 1 there we only need to check one bit.
4840 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4841 And(t8, mask_scratch, load_scratch);
4842 Branch(&done, ne, t8, Operand(zero_reg));
4843
4844 if (FLAG_debug_code) {
4845 // Check for impossible bit pattern.
4846 Label ok;
4847 // sll may overflow, making the check conservative.
4848 sll(t8, mask_scratch, 1);
4849 And(t8, load_scratch, t8);
4850 Branch(&ok, eq, t8, Operand(zero_reg));
4851 stop("Impossible marking bit pattern");
4852 bind(&ok);
4853 }
4854
4855 // Value is white. We check whether it is data that doesn't need scanning.
4856 // Currently only checks for HeapNumber and non-cons strings.
4857 Register map = load_scratch; // Holds map while checking type.
4858 Register length = load_scratch; // Holds length of object after testing type.
4859 Label is_data_object;
4860
4861 // Check for heap-number
4862 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
4863 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4864 {
4865 Label skip;
4866 Branch(&skip, ne, t8, Operand(map));
4867 li(length, HeapNumber::kSize);
4868 Branch(&is_data_object);
4869 bind(&skip);
4870 }
4871
4872 // Check for strings.
4873 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4874 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4875 // If it's a string and it's not a cons string then it's an object containing
4876 // no GC pointers.
4877 Register instance_type = load_scratch;
4878 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4879 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
4880 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
4881 // It's a non-indirect (non-cons and non-slice) string.
4882 // If it's external, the length is just ExternalString::kSize.
4883 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4884 // External strings are the only ones with the kExternalStringTag bit
4885 // set.
4886 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4887 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4888 And(t8, instance_type, Operand(kExternalStringTag));
4889 {
4890 Label skip;
4891 Branch(&skip, eq, t8, Operand(zero_reg));
4892 li(length, ExternalString::kSize);
4893 Branch(&is_data_object);
4894 bind(&skip);
4895 }
4896
4897 // Sequential string, either ASCII or UC16.
4898 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4899 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4900 // getting the length multiplied by 2.
4901 ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
4902 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4903 lw(t9, FieldMemOperand(value, String::kLengthOffset));
4904 And(t8, instance_type, Operand(kStringEncodingMask));
4905 {
4906 Label skip;
4907 Branch(&skip, eq, t8, Operand(zero_reg));
4908 srl(t9, t9, 1);
4909 bind(&skip);
4910 }
4911 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
4912 And(length, length, Operand(~kObjectAlignmentMask));
4913
4914 bind(&is_data_object);
4915 // Value is a data object, and it is white. Mark it black. Since we know
4916 // that the object is white we can make it black by flipping one bit.
4917 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4918 Or(t8, t8, Operand(mask_scratch));
4919 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4920
4921 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
4922 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4923 Addu(t8, t8, Operand(length));
4924 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4925
4926 bind(&done);
4927}
4928
4929
Ben Murdoch257744e2011-11-30 15:57:28 +00004930void MacroAssembler::LoadInstanceDescriptors(Register map,
4931 Register descriptors) {
4932 lw(descriptors,
4933 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
4934 Label not_smi;
4935 JumpIfNotSmi(descriptors, &not_smi);
4936 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
4937 bind(&not_smi);
4938}
4939
4940
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004941void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
4942 ASSERT(!output_reg.is(input_reg));
4943 Label done;
4944 li(output_reg, Operand(255));
4945 // Normal branch: nop in delay slot.
4946 Branch(&done, gt, input_reg, Operand(output_reg));
4947 // Use delay slot in this branch.
4948 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
4949 mov(output_reg, zero_reg); // In delay slot.
4950 mov(output_reg, input_reg); // Value is in range 0..255.
4951 bind(&done);
4952}
4953
4954
4955void MacroAssembler::ClampDoubleToUint8(Register result_reg,
4956 DoubleRegister input_reg,
4957 DoubleRegister temp_double_reg) {
4958 Label above_zero;
4959 Label done;
4960 Label in_bounds;
4961
4962 Move(temp_double_reg, 0.0);
4963 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
4964
4965 // Double value is less than zero, NaN or Inf, return 0.
4966 mov(result_reg, zero_reg);
4967 Branch(&done);
4968
4969 // Double value is >= 255, return 255.
4970 bind(&above_zero);
4971 Move(temp_double_reg, 255.0);
4972 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
4973 li(result_reg, Operand(255));
4974 Branch(&done);
4975
4976 // In 0-255 range, round and truncate.
4977 bind(&in_bounds);
4978 round_w_d(temp_double_reg, input_reg);
4979 mfc1(result_reg, temp_double_reg);
4980 bind(&done);
4981}
4982
4983
4984bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4985 if (r1.is(r2)) return true;
4986 if (r1.is(r3)) return true;
4987 if (r1.is(r4)) return true;
4988 if (r2.is(r3)) return true;
4989 if (r2.is(r4)) return true;
4990 if (r3.is(r4)) return true;
4991 return false;
4992}
4993
4994
Steve Block44f0eee2011-05-26 01:26:41 +01004995CodePatcher::CodePatcher(byte* address, int instructions)
4996 : address_(address),
4997 instructions_(instructions),
4998 size_(instructions * Assembler::kInstrSize),
Ben Murdoch257744e2011-11-30 15:57:28 +00004999 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
Steve Block44f0eee2011-05-26 01:26:41 +01005000 // Create a new macro assembler pointing to the address of the code to patch.
5001 // The size is adjusted with kGap on order for the assembler to generate size
5002 // bytes of instructions without failing with buffer size constraints.
5003 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5004}
5005
5006
5007CodePatcher::~CodePatcher() {
5008 // Indicate that code has changed.
5009 CPU::FlushICache(address_, size_);
5010
5011 // Check that the code was patched as expected.
5012 ASSERT(masm_.pc_ == address_ + size_);
5013 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5014}
5015
5016
Ben Murdoch257744e2011-11-30 15:57:28 +00005017void CodePatcher::Emit(Instr instr) {
5018 masm()->emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +01005019}
5020
5021
5022void CodePatcher::Emit(Address addr) {
5023 masm()->emit(reinterpret_cast<Instr>(addr));
5024}
5025
5026
Ben Murdoch257744e2011-11-30 15:57:28 +00005027void CodePatcher::ChangeBranchCondition(Condition cond) {
5028 Instr instr = Assembler::instr_at(masm_.pc_);
5029 ASSERT(Assembler::IsBranch(instr));
5030 uint32_t opcode = Assembler::GetOpcodeField(instr);
5031 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5032 // branch instructions (with opcode being the branch type).
5033 // There are some special cases (see Assembler::IsBranch()) so extending this
5034 // would be tricky.
5035 ASSERT(opcode == BEQ ||
5036 opcode == BNE ||
5037 opcode == BLEZ ||
5038 opcode == BGTZ ||
5039 opcode == BEQL ||
5040 opcode == BNEL ||
5041 opcode == BLEZL ||
5042 opcode == BGTZL);
5043 opcode = (cond == eq) ? BEQ : BNE;
5044 instr = (instr & ~kOpcodeMask) | opcode;
5045 masm_.emit(instr);
5046}
Steve Block44f0eee2011-05-26 01:26:41 +01005047
5048
Andrei Popescu31002712010-02-23 13:46:05 +00005049} } // namespace v8::internal
5050
Leon Clarkef7060e22010-06-03 12:02:55 +01005051#endif // V8_TARGET_ARCH_MIPS