blob: 941c7fe4eac0e9f42a69a89241313b3e1211158e [file] [log] [blame]
Ben Murdochc7cc0282012-03-05 14:35:55 +00001// Copyright 2012 the V8 project authors. All rights reserved.
Andrei Popescu31002712010-02-23 13:46:05 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Ben Murdoch257744e2011-11-30 15:57:28 +000028#include <limits.h> // For LONG_MIN, LONG_MAX.
Andrei Popescu31002712010-02-23 13:46:05 +000029
30#include "v8.h"
31
Leon Clarkef7060e22010-06-03 12:02:55 +010032#if defined(V8_TARGET_ARCH_MIPS)
33
Andrei Popescu31002712010-02-23 13:46:05 +000034#include "bootstrapper.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000035#include "codegen.h"
Andrei Popescu31002712010-02-23 13:46:05 +000036#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
Ben Murdoch257744e2011-11-30 15:57:28 +000042MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43 : Assembler(arg_isolate, buffer, size),
Andrei Popescu31002712010-02-23 13:46:05 +000044 generating_stub_(false),
Ben Murdoch592a9fc2012-03-05 11:04:45 +000045 allow_stub_calls_(true),
46 has_frame_(false) {
Ben Murdoch257744e2011-11-30 15:57:28 +000047 if (isolate() != NULL) {
48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49 isolate());
50 }
Andrei Popescu31002712010-02-23 13:46:05 +000051}
52
53
Andrei Popescu31002712010-02-23 13:46:05 +000054void MacroAssembler::LoadRoot(Register destination,
55 Heap::RootListIndex index) {
Steve Block6ded16b2010-05-10 14:33:55 +010056 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000057}
58
Steve Block44f0eee2011-05-26 01:26:41 +010059
Andrei Popescu31002712010-02-23 13:46:05 +000060void MacroAssembler::LoadRoot(Register destination,
61 Heap::RootListIndex index,
62 Condition cond,
63 Register src1, const Operand& src2) {
Steve Block44f0eee2011-05-26 01:26:41 +010064 Branch(2, NegateCondition(cond), src1, src2);
Steve Block6ded16b2010-05-10 14:33:55 +010065 lw(destination, MemOperand(s6, index << kPointerSizeLog2));
Andrei Popescu31002712010-02-23 13:46:05 +000066}
67
68
Steve Block44f0eee2011-05-26 01:26:41 +010069void MacroAssembler::StoreRoot(Register source,
70 Heap::RootListIndex index) {
71 sw(source, MemOperand(s6, index << kPointerSizeLog2));
72}
73
74
75void MacroAssembler::StoreRoot(Register source,
76 Heap::RootListIndex index,
77 Condition cond,
78 Register src1, const Operand& src2) {
79 Branch(2, NegateCondition(cond), src1, src2);
80 sw(source, MemOperand(s6, index << kPointerSizeLog2));
81}
82
83
Ben Murdochc7cc0282012-03-05 14:35:55 +000084void MacroAssembler::LoadHeapObject(Register result,
85 Handle<HeapObject> object) {
86 if (isolate()->heap()->InNewSpace(*object)) {
87 Handle<JSGlobalPropertyCell> cell =
88 isolate()->factory()->NewJSGlobalPropertyCell(object);
89 li(result, Operand(cell));
90 lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
91 } else {
92 li(result, Operand(object));
93 }
94}
95
96
Ben Murdoch257744e2011-11-30 15:57:28 +000097// Push and pop all registers that can hold pointers.
98void MacroAssembler::PushSafepointRegisters() {
99 // Safepoints expect a block of kNumSafepointRegisters values on the
100 // stack, so adjust the stack for unsaved registers.
101 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
102 ASSERT(num_unsaved >= 0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000103 if (num_unsaved > 0) {
104 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
105 }
Ben Murdoch257744e2011-11-30 15:57:28 +0000106 MultiPush(kSafepointSavedRegisters);
107}
108
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000109
Ben Murdoch257744e2011-11-30 15:57:28 +0000110void MacroAssembler::PopSafepointRegisters() {
111 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
112 MultiPop(kSafepointSavedRegisters);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000113 if (num_unsaved > 0) {
114 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
115 }
Ben Murdoch257744e2011-11-30 15:57:28 +0000116}
117
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000118
Ben Murdoch257744e2011-11-30 15:57:28 +0000119void MacroAssembler::PushSafepointRegistersAndDoubles() {
120 PushSafepointRegisters();
121 Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
122 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
123 FPURegister reg = FPURegister::FromAllocationIndex(i);
124 sdc1(reg, MemOperand(sp, i * kDoubleSize));
125 }
126}
127
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000128
Ben Murdoch257744e2011-11-30 15:57:28 +0000129void MacroAssembler::PopSafepointRegistersAndDoubles() {
130 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
131 FPURegister reg = FPURegister::FromAllocationIndex(i);
132 ldc1(reg, MemOperand(sp, i * kDoubleSize));
133 }
134 Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
135 PopSafepointRegisters();
136}
137
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000138
Ben Murdoch257744e2011-11-30 15:57:28 +0000139void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
140 Register dst) {
141 sw(src, SafepointRegistersAndDoublesSlot(dst));
142}
143
144
145void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
146 sw(src, SafepointRegisterSlot(dst));
147}
148
149
150void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
151 lw(dst, SafepointRegisterSlot(src));
152}
153
154
155int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
156 // The registers are pushed starting with the highest encoding,
157 // which means that lowest encodings are closest to the stack pointer.
158 return kSafepointRegisterStackIndexMap[reg_code];
159}
160
161
162MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
163 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
164}
165
166
167MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000168 UNIMPLEMENTED_MIPS();
Ben Murdoch257744e2011-11-30 15:57:28 +0000169 // General purpose registers are pushed last on the stack.
170 int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
171 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
172 return MemOperand(sp, doubles_size + register_offset);
173}
174
175
Steve Block44f0eee2011-05-26 01:26:41 +0100176void MacroAssembler::InNewSpace(Register object,
177 Register scratch,
178 Condition cc,
179 Label* branch) {
180 ASSERT(cc == eq || cc == ne);
181 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
182 Branch(branch, cc, scratch,
183 Operand(ExternalReference::new_space_start(isolate())));
184}
185
186
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000187void MacroAssembler::RecordWriteField(
188 Register object,
189 int offset,
190 Register value,
191 Register dst,
192 RAStatus ra_status,
193 SaveFPRegsMode save_fp,
194 RememberedSetAction remembered_set_action,
195 SmiCheck smi_check) {
196 ASSERT(!AreAliased(value, dst, t8, object));
197 // First, check if a write barrier is even needed. The tests below
198 // catch stores of Smis.
Steve Block44f0eee2011-05-26 01:26:41 +0100199 Label done;
200
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000201 // Skip barrier if writing a smi.
202 if (smi_check == INLINE_SMI_CHECK) {
203 JumpIfSmi(value, &done);
204 }
Steve Block44f0eee2011-05-26 01:26:41 +0100205
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000206 // Although the object register is tagged, the offset is relative to the start
207 // of the object, so so offset must be a multiple of kPointerSize.
208 ASSERT(IsAligned(offset, kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +0100209
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000210 Addu(dst, object, Operand(offset - kHeapObjectTag));
211 if (emit_debug_code()) {
212 Label ok;
213 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
214 Branch(&ok, eq, t8, Operand(zero_reg));
215 stop("Unaligned cell in write barrier");
216 bind(&ok);
217 }
218
219 RecordWrite(object,
220 dst,
221 value,
222 ra_status,
223 save_fp,
224 remembered_set_action,
225 OMIT_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +0100226
227 bind(&done);
228
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000229 // Clobber clobbered input registers when running with the debug-code flag
Steve Block44f0eee2011-05-26 01:26:41 +0100230 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000231 if (emit_debug_code()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000232 li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
233 li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
Steve Block44f0eee2011-05-26 01:26:41 +0100234 }
235}
236
237
238// Will clobber 4 registers: object, address, scratch, ip. The
239// register 'object' contains a heap object pointer. The heap object
240// tag is shifted away.
241void MacroAssembler::RecordWrite(Register object,
242 Register address,
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000243 Register value,
244 RAStatus ra_status,
245 SaveFPRegsMode fp_mode,
246 RememberedSetAction remembered_set_action,
247 SmiCheck smi_check) {
248 ASSERT(!AreAliased(object, address, value, t8));
249 ASSERT(!AreAliased(object, address, value, t9));
Steve Block44f0eee2011-05-26 01:26:41 +0100250 // The compiled code assumes that record write doesn't change the
251 // context register, so we check that none of the clobbered
252 // registers are cp.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000253 ASSERT(!address.is(cp) && !value.is(cp));
Steve Block44f0eee2011-05-26 01:26:41 +0100254
Ben Murdochc7cc0282012-03-05 14:35:55 +0000255 if (emit_debug_code()) {
256 lw(at, MemOperand(address));
257 Assert(
258 eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
259 }
260
Steve Block44f0eee2011-05-26 01:26:41 +0100261 Label done;
262
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000263 if (smi_check == INLINE_SMI_CHECK) {
264 ASSERT_EQ(0, kSmiTag);
265 JumpIfSmi(value, &done);
266 }
267
268 CheckPageFlag(value,
269 value, // Used as scratch.
270 MemoryChunk::kPointersToHereAreInterestingMask,
271 eq,
272 &done);
273 CheckPageFlag(object,
274 value, // Used as scratch.
275 MemoryChunk::kPointersFromHereAreInterestingMask,
276 eq,
277 &done);
Steve Block44f0eee2011-05-26 01:26:41 +0100278
279 // Record the actual write.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000280 if (ra_status == kRAHasNotBeenSaved) {
281 push(ra);
282 }
283 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
284 CallStub(&stub);
285 if (ra_status == kRAHasNotBeenSaved) {
286 pop(ra);
287 }
Steve Block44f0eee2011-05-26 01:26:41 +0100288
289 bind(&done);
290
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000291 // Clobber clobbered registers when running with the debug-code flag
Steve Block44f0eee2011-05-26 01:26:41 +0100292 // turned on to provoke errors.
Ben Murdoch257744e2011-11-30 15:57:28 +0000293 if (emit_debug_code()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000294 li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
295 li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
296 }
297}
298
299
300void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
301 Register address,
302 Register scratch,
303 SaveFPRegsMode fp_mode,
304 RememberedSetFinalAction and_then) {
305 Label done;
Ben Murdochc7cc0282012-03-05 14:35:55 +0000306 if (emit_debug_code()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000307 Label ok;
308 JumpIfNotInNewSpace(object, scratch, &ok);
309 stop("Remembered set pointer is in new space");
310 bind(&ok);
311 }
312 // Load store buffer top.
313 ExternalReference store_buffer =
314 ExternalReference::store_buffer_top(isolate());
315 li(t8, Operand(store_buffer));
316 lw(scratch, MemOperand(t8));
317 // Store pointer to buffer and increment buffer top.
318 sw(address, MemOperand(scratch));
319 Addu(scratch, scratch, kPointerSize);
320 // Write back new top of buffer.
321 sw(scratch, MemOperand(t8));
322 // Call stub on end of buffer.
323 // Check for end of buffer.
324 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325 if (and_then == kFallThroughAtEnd) {
326 Branch(&done, eq, t8, Operand(zero_reg));
327 } else {
328 ASSERT(and_then == kReturnAtEnd);
329 Ret(eq, t8, Operand(zero_reg));
330 }
331 push(ra);
332 StoreBufferOverflowStub store_buffer_overflow =
333 StoreBufferOverflowStub(fp_mode);
334 CallStub(&store_buffer_overflow);
335 pop(ra);
336 bind(&done);
337 if (and_then == kReturnAtEnd) {
338 Ret();
Steve Block44f0eee2011-05-26 01:26:41 +0100339 }
340}
341
342
343// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000344// Allocation support.
Steve Block44f0eee2011-05-26 01:26:41 +0100345
346
347void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
348 Register scratch,
349 Label* miss) {
350 Label same_contexts;
351
352 ASSERT(!holder_reg.is(scratch));
353 ASSERT(!holder_reg.is(at));
354 ASSERT(!scratch.is(at));
355
356 // Load current lexical context from the stack frame.
357 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
358 // In debug mode, make sure the lexical context is set.
359#ifdef DEBUG
360 Check(ne, "we should not have an empty lexical context",
361 scratch, Operand(zero_reg));
362#endif
363
364 // Load the global context of the current context.
365 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
366 lw(scratch, FieldMemOperand(scratch, offset));
367 lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
368
369 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000370 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100371 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000372 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100373 // Read the first word and compare to the global_context_map.
374 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
375 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
376 Check(eq, "JSGlobalObject::global_context should be a global context.",
377 holder_reg, Operand(at));
Ben Murdoch257744e2011-11-30 15:57:28 +0000378 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100379 }
380
381 // Check if both contexts are the same.
382 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
383 Branch(&same_contexts, eq, scratch, Operand(at));
384
385 // Check the context is a global context.
Ben Murdoch257744e2011-11-30 15:57:28 +0000386 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100387 // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
Ben Murdoch257744e2011-11-30 15:57:28 +0000388 push(holder_reg); // Temporarily save holder on the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100389 mov(holder_reg, at); // Move at to its holding place.
390 LoadRoot(at, Heap::kNullValueRootIndex);
391 Check(ne, "JSGlobalProxy::context() should not be null.",
392 holder_reg, Operand(at));
393
394 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
395 LoadRoot(at, Heap::kGlobalContextMapRootIndex);
396 Check(eq, "JSGlobalObject::global_context should be a global context.",
397 holder_reg, Operand(at));
398 // Restore at is not needed. at is reloaded below.
Ben Murdoch257744e2011-11-30 15:57:28 +0000399 pop(holder_reg); // Restore holder.
Steve Block44f0eee2011-05-26 01:26:41 +0100400 // Restore at to holder's context.
401 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
402 }
403
404 // Check that the security token in the calling global object is
405 // compatible with the security token in the receiving global
406 // object.
407 int token_offset = Context::kHeaderSize +
408 Context::SECURITY_TOKEN_INDEX * kPointerSize;
409
410 lw(scratch, FieldMemOperand(scratch, token_offset));
411 lw(at, FieldMemOperand(at, token_offset));
412 Branch(miss, ne, scratch, Operand(at));
413
414 bind(&same_contexts);
Andrei Popescu31002712010-02-23 13:46:05 +0000415}
416
417
Ben Murdochc7cc0282012-03-05 14:35:55 +0000418void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
419 // First of all we assign the hash seed to scratch.
420 LoadRoot(scratch, Heap::kHashSeedRootIndex);
421 SmiUntag(scratch);
422
423 // Xor original key with a seed.
424 xor_(reg0, reg0, scratch);
425
426 // Compute the hash code from the untagged key. This must be kept in sync
427 // with ComputeIntegerHash in utils.h.
428 //
429 // hash = ~hash + (hash << 15);
430 nor(scratch, reg0, zero_reg);
431 sll(at, reg0, 15);
432 addu(reg0, scratch, at);
433
434 // hash = hash ^ (hash >> 12);
435 srl(at, reg0, 12);
436 xor_(reg0, reg0, at);
437
438 // hash = hash + (hash << 2);
439 sll(at, reg0, 2);
440 addu(reg0, reg0, at);
441
442 // hash = hash ^ (hash >> 4);
443 srl(at, reg0, 4);
444 xor_(reg0, reg0, at);
445
446 // hash = hash * 2057;
447 sll(scratch, reg0, 11);
448 sll(at, reg0, 3);
449 addu(reg0, reg0, at);
450 addu(reg0, reg0, scratch);
451
452 // hash = hash ^ (hash >> 16);
453 srl(at, reg0, 16);
454 xor_(reg0, reg0, at);
455}
456
457
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000458void MacroAssembler::LoadFromNumberDictionary(Label* miss,
459 Register elements,
460 Register key,
461 Register result,
462 Register reg0,
463 Register reg1,
464 Register reg2) {
465 // Register use:
466 //
467 // elements - holds the slow-case elements of the receiver on entry.
468 // Unchanged unless 'result' is the same register.
469 //
470 // key - holds the smi key on entry.
471 // Unchanged unless 'result' is the same register.
472 //
473 //
474 // result - holds the result on exit if the load succeeded.
475 // Allowed to be the same as 'key' or 'result'.
476 // Unchanged on bailout so 'key' or 'result' can be used
477 // in further computation.
478 //
479 // Scratch registers:
480 //
481 // reg0 - holds the untagged key on entry and holds the hash once computed.
482 //
483 // reg1 - Used to hold the capacity mask of the dictionary.
484 //
485 // reg2 - Used for the index into the dictionary.
486 // at - Temporary (avoid MacroAssembler instructions also using 'at').
487 Label done;
488
Ben Murdochc7cc0282012-03-05 14:35:55 +0000489 GetNumberHash(reg0, reg1);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000490
491 // Compute the capacity mask.
Ben Murdochc7cc0282012-03-05 14:35:55 +0000492 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000493 sra(reg1, reg1, kSmiTagSize);
494 Subu(reg1, reg1, Operand(1));
495
496 // Generate an unrolled loop that performs a few probes before giving up.
497 static const int kProbes = 4;
498 for (int i = 0; i < kProbes; i++) {
499 // Use reg2 for index calculations and keep the hash intact in reg0.
500 mov(reg2, reg0);
501 // Compute the masked index: (hash + i + i * i) & mask.
502 if (i > 0) {
Ben Murdochc7cc0282012-03-05 14:35:55 +0000503 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000504 }
505 and_(reg2, reg2, reg1);
506
507 // Scale the index by multiplying by the element size.
Ben Murdochc7cc0282012-03-05 14:35:55 +0000508 ASSERT(SeededNumberDictionary::kEntrySize == 3);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000509 sll(at, reg2, 1); // 2x.
510 addu(reg2, reg2, at); // reg2 = reg2 * 3.
511
512 // Check if the key is identical to the name.
513 sll(at, reg2, kPointerSizeLog2);
514 addu(reg2, elements, at);
515
Ben Murdochc7cc0282012-03-05 14:35:55 +0000516 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000517 if (i != kProbes - 1) {
518 Branch(&done, eq, key, Operand(at));
519 } else {
520 Branch(miss, ne, key, Operand(at));
521 }
522 }
523
524 bind(&done);
525 // Check that the value is a normal property.
526 // reg2: elements + (index * kPointerSize).
527 const int kDetailsOffset =
Ben Murdochc7cc0282012-03-05 14:35:55 +0000528 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000529 lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
Ben Murdoch589d6972011-11-30 16:04:58 +0000530 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000531 Branch(miss, ne, at, Operand(zero_reg));
532
533 // Get the value at the masked, scaled index and return.
534 const int kValueOffset =
Ben Murdochc7cc0282012-03-05 14:35:55 +0000535 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000536 lw(result, FieldMemOperand(reg2, kValueOffset));
537}
538
539
Andrei Popescu31002712010-02-23 13:46:05 +0000540// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +0000541// Instruction macros.
Andrei Popescu31002712010-02-23 13:46:05 +0000542
Andrei Popescu31002712010-02-23 13:46:05 +0000543void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
544 if (rt.is_reg()) {
545 addu(rd, rs, rt.rm());
546 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100547 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000548 addiu(rd, rs, rt.imm32_);
549 } else {
550 // li handles the relocation.
551 ASSERT(!rs.is(at));
552 li(at, rt);
553 addu(rd, rs, at);
554 }
555 }
556}
557
558
Steve Block44f0eee2011-05-26 01:26:41 +0100559void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
560 if (rt.is_reg()) {
561 subu(rd, rs, rt.rm());
562 } else {
563 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
564 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
565 } else {
566 // li handles the relocation.
567 ASSERT(!rs.is(at));
568 li(at, rt);
569 subu(rd, rs, at);
570 }
571 }
572}
573
574
Andrei Popescu31002712010-02-23 13:46:05 +0000575void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
576 if (rt.is_reg()) {
577 mul(rd, rs, rt.rm());
578 } else {
579 // li handles the relocation.
580 ASSERT(!rs.is(at));
581 li(at, rt);
582 mul(rd, rs, at);
583 }
584}
585
586
587void MacroAssembler::Mult(Register rs, const Operand& rt) {
588 if (rt.is_reg()) {
589 mult(rs, rt.rm());
590 } else {
591 // li handles the relocation.
592 ASSERT(!rs.is(at));
593 li(at, rt);
594 mult(rs, at);
595 }
596}
597
598
599void MacroAssembler::Multu(Register rs, const Operand& rt) {
600 if (rt.is_reg()) {
601 multu(rs, rt.rm());
602 } else {
603 // li handles the relocation.
604 ASSERT(!rs.is(at));
605 li(at, rt);
606 multu(rs, at);
607 }
608}
609
610
611void MacroAssembler::Div(Register rs, const Operand& rt) {
612 if (rt.is_reg()) {
613 div(rs, rt.rm());
614 } else {
615 // li handles the relocation.
616 ASSERT(!rs.is(at));
617 li(at, rt);
618 div(rs, at);
619 }
620}
621
622
623void MacroAssembler::Divu(Register rs, const Operand& rt) {
624 if (rt.is_reg()) {
625 divu(rs, rt.rm());
626 } else {
627 // li handles the relocation.
628 ASSERT(!rs.is(at));
629 li(at, rt);
630 divu(rs, at);
631 }
632}
633
634
635void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
636 if (rt.is_reg()) {
637 and_(rd, rs, rt.rm());
638 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100639 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000640 andi(rd, rs, rt.imm32_);
641 } else {
642 // li handles the relocation.
643 ASSERT(!rs.is(at));
644 li(at, rt);
645 and_(rd, rs, at);
646 }
647 }
648}
649
650
651void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
652 if (rt.is_reg()) {
653 or_(rd, rs, rt.rm());
654 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100655 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000656 ori(rd, rs, rt.imm32_);
657 } else {
658 // li handles the relocation.
659 ASSERT(!rs.is(at));
660 li(at, rt);
661 or_(rd, rs, at);
662 }
663 }
664}
665
666
667void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
668 if (rt.is_reg()) {
669 xor_(rd, rs, rt.rm());
670 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100671 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000672 xori(rd, rs, rt.imm32_);
673 } else {
674 // li handles the relocation.
675 ASSERT(!rs.is(at));
676 li(at, rt);
677 xor_(rd, rs, at);
678 }
679 }
680}
681
682
683void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
684 if (rt.is_reg()) {
685 nor(rd, rs, rt.rm());
686 } else {
687 // li handles the relocation.
688 ASSERT(!rs.is(at));
689 li(at, rt);
690 nor(rd, rs, at);
691 }
692}
693
694
Ben Murdoch257744e2011-11-30 15:57:28 +0000695void MacroAssembler::Neg(Register rs, const Operand& rt) {
696 ASSERT(rt.is_reg());
697 ASSERT(!at.is(rs));
698 ASSERT(!at.is(rt.rm()));
699 li(at, -1);
700 xor_(rs, rt.rm(), at);
701}
702
703
Andrei Popescu31002712010-02-23 13:46:05 +0000704void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
705 if (rt.is_reg()) {
706 slt(rd, rs, rt.rm());
707 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100708 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000709 slti(rd, rs, rt.imm32_);
710 } else {
711 // li handles the relocation.
712 ASSERT(!rs.is(at));
713 li(at, rt);
714 slt(rd, rs, at);
715 }
716 }
717}
718
719
720void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
721 if (rt.is_reg()) {
722 sltu(rd, rs, rt.rm());
723 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100724 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000725 sltiu(rd, rs, rt.imm32_);
726 } else {
727 // li handles the relocation.
728 ASSERT(!rs.is(at));
729 li(at, rt);
730 sltu(rd, rs, at);
731 }
732 }
733}
734
735
Steve Block44f0eee2011-05-26 01:26:41 +0100736void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
737 if (mips32r2) {
738 if (rt.is_reg()) {
739 rotrv(rd, rs, rt.rm());
740 } else {
741 rotr(rd, rs, rt.imm32_);
742 }
743 } else {
744 if (rt.is_reg()) {
745 subu(at, zero_reg, rt.rm());
746 sllv(at, rs, at);
747 srlv(rd, rs, rt.rm());
748 or_(rd, rd, at);
749 } else {
750 if (rt.imm32_ == 0) {
751 srl(rd, rs, 0);
752 } else {
753 srl(at, rs, rt.imm32_);
754 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
755 or_(rd, rd, at);
756 }
757 }
758 }
Andrei Popescu31002712010-02-23 13:46:05 +0000759}
760
761
Steve Block44f0eee2011-05-26 01:26:41 +0100762//------------Pseudo-instructions-------------
763
Andrei Popescu31002712010-02-23 13:46:05 +0000764void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
765 ASSERT(!j.is_reg());
Steve Block44f0eee2011-05-26 01:26:41 +0100766 BlockTrampolinePoolScope block_trampoline_pool(this);
767 if (!MustUseReg(j.rmode_) && !gen2instr) {
Andrei Popescu31002712010-02-23 13:46:05 +0000768 // Normal load of an immediate value which does not need Relocation Info.
769 if (is_int16(j.imm32_)) {
770 addiu(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100771 } else if (!(j.imm32_ & kHiMask)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000772 ori(rd, zero_reg, j.imm32_);
Steve Block44f0eee2011-05-26 01:26:41 +0100773 } else if (!(j.imm32_ & kImm16Mask)) {
774 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
Andrei Popescu31002712010-02-23 13:46:05 +0000775 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100776 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
777 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000778 }
Steve Block44f0eee2011-05-26 01:26:41 +0100779 } else if (MustUseReg(j.rmode_) || gen2instr) {
780 if (MustUseReg(j.rmode_)) {
Andrei Popescu31002712010-02-23 13:46:05 +0000781 RecordRelocInfo(j.rmode_, j.imm32_);
782 }
783 // We need always the same number of instructions as we may need to patch
784 // this code to load another value which may need 2 instructions to load.
Ben Murdoch257744e2011-11-30 15:57:28 +0000785 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
786 ori(rd, rd, (j.imm32_ & kImm16Mask));
Andrei Popescu31002712010-02-23 13:46:05 +0000787 }
788}
789
790
Andrei Popescu31002712010-02-23 13:46:05 +0000791void MacroAssembler::MultiPush(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000792 int16_t num_to_push = NumberOfBitsSet(regs);
793 int16_t stack_offset = num_to_push * kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000794
Ben Murdoch589d6972011-11-30 16:04:58 +0000795 Subu(sp, sp, Operand(stack_offset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000796 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000797 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000798 stack_offset -= kPointerSize;
799 sw(ToRegister(i), MemOperand(sp, stack_offset));
Andrei Popescu31002712010-02-23 13:46:05 +0000800 }
801 }
802}
803
804
805void MacroAssembler::MultiPushReversed(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000806 int16_t num_to_push = NumberOfBitsSet(regs);
807 int16_t stack_offset = num_to_push * kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000808
Ben Murdoch589d6972011-11-30 16:04:58 +0000809 Subu(sp, sp, Operand(stack_offset));
Steve Block6ded16b2010-05-10 14:33:55 +0100810 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000811 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000812 stack_offset -= kPointerSize;
813 sw(ToRegister(i), MemOperand(sp, stack_offset));
Andrei Popescu31002712010-02-23 13:46:05 +0000814 }
815 }
816}
817
818
819void MacroAssembler::MultiPop(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000820 int16_t stack_offset = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000821
Steve Block6ded16b2010-05-10 14:33:55 +0100822 for (int16_t i = 0; i < kNumRegisters; i++) {
Andrei Popescu31002712010-02-23 13:46:05 +0000823 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000824 lw(ToRegister(i), MemOperand(sp, stack_offset));
825 stack_offset += kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000826 }
827 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000828 addiu(sp, sp, stack_offset);
Andrei Popescu31002712010-02-23 13:46:05 +0000829}
830
831
832void MacroAssembler::MultiPopReversed(RegList regs) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000833 int16_t stack_offset = 0;
Andrei Popescu31002712010-02-23 13:46:05 +0000834
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000835 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Andrei Popescu31002712010-02-23 13:46:05 +0000836 if ((regs & (1 << i)) != 0) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000837 lw(ToRegister(i), MemOperand(sp, stack_offset));
838 stack_offset += kPointerSize;
Andrei Popescu31002712010-02-23 13:46:05 +0000839 }
840 }
Ben Murdoch589d6972011-11-30 16:04:58 +0000841 addiu(sp, sp, stack_offset);
842}
843
844
845void MacroAssembler::MultiPushFPU(RegList regs) {
846 CpuFeatures::Scope scope(FPU);
847 int16_t num_to_push = NumberOfBitsSet(regs);
848 int16_t stack_offset = num_to_push * kDoubleSize;
849
850 Subu(sp, sp, Operand(stack_offset));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000851 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000852 if ((regs & (1 << i)) != 0) {
853 stack_offset -= kDoubleSize;
854 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
855 }
856 }
857}
858
859
860void MacroAssembler::MultiPushReversedFPU(RegList regs) {
861 CpuFeatures::Scope scope(FPU);
862 int16_t num_to_push = NumberOfBitsSet(regs);
863 int16_t stack_offset = num_to_push * kDoubleSize;
864
865 Subu(sp, sp, Operand(stack_offset));
866 for (int16_t i = 0; i < kNumRegisters; i++) {
867 if ((regs & (1 << i)) != 0) {
868 stack_offset -= kDoubleSize;
869 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
870 }
871 }
872}
873
874
875void MacroAssembler::MultiPopFPU(RegList regs) {
876 CpuFeatures::Scope scope(FPU);
877 int16_t stack_offset = 0;
878
879 for (int16_t i = 0; i < kNumRegisters; i++) {
880 if ((regs & (1 << i)) != 0) {
881 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
882 stack_offset += kDoubleSize;
883 }
884 }
885 addiu(sp, sp, stack_offset);
886}
887
888
889void MacroAssembler::MultiPopReversedFPU(RegList regs) {
890 CpuFeatures::Scope scope(FPU);
891 int16_t stack_offset = 0;
892
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000893 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000894 if ((regs & (1 << i)) != 0) {
895 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
896 stack_offset += kDoubleSize;
897 }
898 }
899 addiu(sp, sp, stack_offset);
Andrei Popescu31002712010-02-23 13:46:05 +0000900}
901
902
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000903void MacroAssembler::FlushICache(Register address, unsigned instructions) {
904 RegList saved_regs = kJSCallerSaved | ra.bit();
905 MultiPush(saved_regs);
906 AllowExternalCallThatCantCauseGC scope(this);
907
908 // Save to a0 in case address == t0.
909 Move(a0, address);
910 PrepareCallCFunction(2, t0);
911
912 li(a1, instructions * kInstrSize);
913 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
914 MultiPop(saved_regs);
915}
916
917
Steve Block44f0eee2011-05-26 01:26:41 +0100918void MacroAssembler::Ext(Register rt,
919 Register rs,
920 uint16_t pos,
921 uint16_t size) {
922 ASSERT(pos < 32);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000923 ASSERT(pos + size < 33);
Andrei Popescu31002712010-02-23 13:46:05 +0000924
Steve Block44f0eee2011-05-26 01:26:41 +0100925 if (mips32r2) {
926 ext_(rt, rs, pos, size);
927 } else {
928 // Move rs to rt and shift it left then right to get the
929 // desired bitfield on the right side and zeroes on the left.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000930 int shift_left = 32 - (pos + size);
931 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
932
933 int shift_right = 32 - size;
934 if (shift_right > 0) {
935 srl(rt, rt, shift_right);
936 }
Steve Block44f0eee2011-05-26 01:26:41 +0100937 }
938}
939
940
941void MacroAssembler::Ins(Register rt,
942 Register rs,
943 uint16_t pos,
944 uint16_t size) {
945 ASSERT(pos < 32);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000946 ASSERT(pos + size <= 32);
947 ASSERT(size != 0);
Steve Block44f0eee2011-05-26 01:26:41 +0100948
949 if (mips32r2) {
950 ins_(rt, rs, pos, size);
951 } else {
952 ASSERT(!rt.is(t8) && !rs.is(t8));
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000953 Subu(at, zero_reg, Operand(1));
954 srl(at, at, 32 - size);
955 and_(t8, rs, at);
956 sll(t8, t8, pos);
957 sll(at, at, pos);
958 nor(at, at, zero_reg);
959 and_(at, rt, at);
960 or_(rt, t8, at);
Steve Block44f0eee2011-05-26 01:26:41 +0100961 }
962}
963
964
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000965void MacroAssembler::Cvt_d_uw(FPURegister fd,
966 FPURegister fs,
967 FPURegister scratch) {
968 // Move the data from fs to t8.
969 mfc1(t8, fs);
970 Cvt_d_uw(fd, t8, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +0100971}
972
973
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000974void MacroAssembler::Cvt_d_uw(FPURegister fd,
975 Register rs,
976 FPURegister scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +0100977 // Convert rs to a FP value in fd (and fd + 1).
978 // We do this by converting rs minus the MSB to avoid sign conversion,
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000979 // then adding 2^31 to the result (if needed).
Steve Block44f0eee2011-05-26 01:26:41 +0100980
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000981 ASSERT(!fd.is(scratch));
Steve Block44f0eee2011-05-26 01:26:41 +0100982 ASSERT(!rs.is(t9));
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000983 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +0100984
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000985 // Save rs's MSB to t9.
986 Ext(t9, rs, 31, 1);
Steve Block44f0eee2011-05-26 01:26:41 +0100987 // Remove rs's MSB.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000988 Ext(at, rs, 0, 31);
989 // Move the result to fd.
990 mtc1(at, fd);
Steve Block44f0eee2011-05-26 01:26:41 +0100991
992 // Convert fd to a real FP value.
993 cvt_d_w(fd, fd);
994
995 Label conversion_done;
996
997 // If rs's MSB was 0, it's done.
998 // Otherwise we need to add that to the FP register.
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000999 Branch(&conversion_done, eq, t9, Operand(zero_reg));
Steve Block44f0eee2011-05-26 01:26:41 +01001000
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001001 // Load 2^31 into f20 as its float representation.
1002 li(at, 0x41E00000);
1003 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1004 mtc1(zero_reg, scratch);
1005 // Add it to fd.
1006 add_d(fd, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +01001007
Steve Block44f0eee2011-05-26 01:26:41 +01001008 bind(&conversion_done);
1009}
1010
1011
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001012void MacroAssembler::Trunc_uw_d(FPURegister fd,
1013 FPURegister fs,
1014 FPURegister scratch) {
1015 Trunc_uw_d(fs, t8, scratch);
1016 mtc1(t8, fd);
Steve Block44f0eee2011-05-26 01:26:41 +01001017}
1018
1019
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001020void MacroAssembler::Trunc_uw_d(FPURegister fd,
1021 Register rs,
1022 FPURegister scratch) {
1023 ASSERT(!fd.is(scratch));
1024 ASSERT(!rs.is(at));
Steve Block44f0eee2011-05-26 01:26:41 +01001025
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001026 // Load 2^31 into scratch as its float representation.
1027 li(at, 0x41E00000);
1028 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1029 mtc1(zero_reg, scratch);
1030 // Test if scratch > fd.
Steve Block44f0eee2011-05-26 01:26:41 +01001031 // If fd < 2^31 we can convert it normally.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001032 Label simple_convert;
1033 BranchF(&simple_convert, NULL, lt, fd, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +01001034
1035 // First we subtract 2^31 from fd, then trunc it to rs
1036 // and add 2^31 to rs.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001037 sub_d(scratch, fd, scratch);
1038 trunc_w_d(scratch, scratch);
1039 mfc1(rs, scratch);
1040 Or(rs, rs, 1 << 31);
Steve Block44f0eee2011-05-26 01:26:41 +01001041
1042 Label done;
1043 Branch(&done);
1044 // Simple conversion.
1045 bind(&simple_convert);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001046 trunc_w_d(scratch, fd);
1047 mfc1(rs, scratch);
Steve Block44f0eee2011-05-26 01:26:41 +01001048
1049 bind(&done);
1050}
1051
1052
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001053void MacroAssembler::BranchF(Label* target,
1054 Label* nan,
1055 Condition cc,
1056 FPURegister cmp1,
1057 FPURegister cmp2,
1058 BranchDelaySlot bd) {
1059 if (cc == al) {
1060 Branch(bd, target);
1061 return;
1062 }
1063
1064 ASSERT(nan || target);
1065 // Check for unordered (NaN) cases.
1066 if (nan) {
1067 c(UN, D, cmp1, cmp2);
1068 bc1t(nan);
1069 }
1070
1071 if (target) {
1072 // Here NaN cases were either handled by this function or are assumed to
1073 // have been handled by the caller.
1074 // Unsigned conditions are treated as their signed counterpart.
1075 switch (cc) {
1076 case Uless:
1077 case less:
1078 c(OLT, D, cmp1, cmp2);
1079 bc1t(target);
1080 break;
1081 case Ugreater:
1082 case greater:
1083 c(ULE, D, cmp1, cmp2);
1084 bc1f(target);
1085 break;
1086 case Ugreater_equal:
1087 case greater_equal:
1088 c(ULT, D, cmp1, cmp2);
1089 bc1f(target);
1090 break;
1091 case Uless_equal:
1092 case less_equal:
1093 c(OLE, D, cmp1, cmp2);
1094 bc1t(target);
1095 break;
1096 case eq:
1097 c(EQ, D, cmp1, cmp2);
1098 bc1t(target);
1099 break;
1100 case ne:
1101 c(EQ, D, cmp1, cmp2);
1102 bc1f(target);
1103 break;
1104 default:
1105 CHECK(0);
1106 };
1107 }
1108
1109 if (bd == PROTECT) {
1110 nop();
1111 }
1112}
1113
1114
1115void MacroAssembler::Move(FPURegister dst, double imm) {
1116 ASSERT(CpuFeatures::IsEnabled(FPU));
1117 static const DoubleRepresentation minus_zero(-0.0);
1118 static const DoubleRepresentation zero(0.0);
1119 DoubleRepresentation value(imm);
1120 // Handle special values first.
1121 bool force_load = dst.is(kDoubleRegZero);
1122 if (value.bits == zero.bits && !force_load) {
1123 mov_d(dst, kDoubleRegZero);
1124 } else if (value.bits == minus_zero.bits && !force_load) {
1125 neg_d(dst, kDoubleRegZero);
1126 } else {
1127 uint32_t lo, hi;
1128 DoubleAsTwoUInt32(imm, &lo, &hi);
1129 // Move the low part of the double into the lower of the corresponding FPU
1130 // register of FPU register pair.
1131 if (lo != 0) {
1132 li(at, Operand(lo));
1133 mtc1(at, dst);
1134 } else {
1135 mtc1(zero_reg, dst);
1136 }
1137 // Move the high part of the double into the higher of the corresponding FPU
1138 // register of FPU register pair.
1139 if (hi != 0) {
1140 li(at, Operand(hi));
1141 mtc1(at, dst.high());
1142 } else {
1143 mtc1(zero_reg, dst.high());
1144 }
1145 }
1146}
1147
1148
Steve Block44f0eee2011-05-26 01:26:41 +01001149// Tries to get a signed int32 out of a double precision floating point heap
1150// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1151// 32bits signed integer range.
1152// This method implementation differs from the ARM version for performance
1153// reasons.
1154void MacroAssembler::ConvertToInt32(Register source,
1155 Register dest,
1156 Register scratch,
1157 Register scratch2,
1158 FPURegister double_scratch,
1159 Label *not_int32) {
1160 Label right_exponent, done;
1161 // Get exponent word (ENDIAN issues).
1162 lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1163 // Get exponent alone in scratch2.
1164 And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1165 // Load dest with zero. We use this either for the final shift or
1166 // for the answer.
1167 mov(dest, zero_reg);
1168 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1169 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1170 // the exponent that we are fastest at and also the highest exponent we can
1171 // handle here.
1172 const uint32_t non_smi_exponent =
1173 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1174 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
1175 Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
1176 // If the exponent is higher than that then go to not_int32 case. This
1177 // catches numbers that don't fit in a signed int32, infinities and NaNs.
1178 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
1179
1180 // We know the exponent is smaller than 30 (biased). If it is less than
Ben Murdochc7cc0282012-03-05 14:35:55 +00001181 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
Steve Block44f0eee2011-05-26 01:26:41 +01001182 // it rounds to zero.
1183 const uint32_t zero_exponent =
1184 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1185 Subu(scratch2, scratch2, Operand(zero_exponent));
1186 // Dest already has a Smi zero.
1187 Branch(&done, lt, scratch2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00001188 if (!CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001189 // We have a shifted exponent between 0 and 30 in scratch2.
1190 srl(dest, scratch2, HeapNumber::kExponentShift);
1191 // We now have the exponent in dest. Subtract from 30 to get
1192 // how much to shift down.
1193 li(at, Operand(30));
1194 subu(dest, at, dest);
1195 }
1196 bind(&right_exponent);
Ben Murdoch257744e2011-11-30 15:57:28 +00001197 if (CpuFeatures::IsSupported(FPU)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001198 CpuFeatures::Scope scope(FPU);
1199 // MIPS FPU instructions implementing double precision to integer
1200 // conversion using round to zero. Since the FP value was qualified
1201 // above, the resulting integer should be a legal int32.
1202 // The original 'Exponent' word is still in scratch.
1203 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1204 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1205 trunc_w_d(double_scratch, double_scratch);
1206 mfc1(dest, double_scratch);
1207 } else {
1208 // On entry, dest has final downshift, scratch has original sign/exp/mant.
1209 // Save sign bit in top bit of dest.
1210 And(scratch2, scratch, Operand(0x80000000));
1211 Or(dest, dest, Operand(scratch2));
1212 // Put back the implicit 1, just above mantissa field.
1213 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1214
1215 // Shift up the mantissa bits to take up the space the exponent used to
1216 // take. We just orred in the implicit bit so that took care of one and
1217 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1218 // distance. But we want to clear the sign-bit so shift one more bit
1219 // left, then shift right one bit.
1220 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1221 sll(scratch, scratch, shift_distance + 1);
1222 srl(scratch, scratch, 1);
1223
1224 // Get the second half of the double. For some exponents we don't
1225 // actually need this because the bits get shifted out again, but
1226 // it's probably slower to test than just to do it.
1227 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1228 // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
1229 // The width of the field here is the same as the shift amount above.
1230 const int field_width = shift_distance;
1231 Ext(scratch2, scratch2, 32-shift_distance, field_width);
1232 Ins(scratch, scratch2, 0, field_width);
1233 // Move down according to the exponent.
1234 srlv(scratch, scratch, dest);
1235 // Prepare the negative version of our integer.
1236 subu(scratch2, zero_reg, scratch);
1237 // Trick to check sign bit (msb) held in dest, count leading zero.
1238 // 0 indicates negative, save negative version with conditional move.
1239 clz(dest, dest);
1240 movz(scratch, scratch2, dest);
1241 mov(dest, scratch);
1242 }
1243 bind(&done);
1244}
1245
1246
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001247void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1248 FPURegister result,
1249 DoubleRegister double_input,
1250 Register scratch1,
1251 Register except_flag,
1252 CheckForInexactConversion check_inexact) {
1253 ASSERT(CpuFeatures::IsSupported(FPU));
1254 CpuFeatures::Scope scope(FPU);
1255
1256 int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
1257
1258 if (check_inexact == kDontCheckForInexactConversion) {
1259 // Ingore inexact exceptions.
1260 except_mask &= ~kFCSRInexactFlagMask;
1261 }
1262
1263 // Save FCSR.
1264 cfc1(scratch1, FCSR);
1265 // Disable FPU exceptions.
1266 ctc1(zero_reg, FCSR);
1267
1268 // Do operation based on rounding mode.
1269 switch (rounding_mode) {
1270 case kRoundToNearest:
1271 round_w_d(result, double_input);
1272 break;
1273 case kRoundToZero:
1274 trunc_w_d(result, double_input);
1275 break;
1276 case kRoundToPlusInf:
1277 ceil_w_d(result, double_input);
1278 break;
1279 case kRoundToMinusInf:
1280 floor_w_d(result, double_input);
1281 break;
1282 } // End of switch-statement.
1283
1284 // Retrieve FCSR.
1285 cfc1(except_flag, FCSR);
1286 // Restore FCSR.
1287 ctc1(scratch1, FCSR);
1288
1289 // Check for fpu exceptions.
1290 And(except_flag, except_flag, Operand(except_mask));
1291}
1292
1293
Ben Murdoch257744e2011-11-30 15:57:28 +00001294void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1295 Register input_high,
1296 Register input_low,
1297 Register scratch) {
1298 Label done, normal_exponent, restore_sign;
1299 // Extract the biased exponent in result.
1300 Ext(result,
1301 input_high,
1302 HeapNumber::kExponentShift,
1303 HeapNumber::kExponentBits);
1304
1305 // Check for Infinity and NaNs, which should return 0.
1306 Subu(scratch, result, HeapNumber::kExponentMask);
1307 movz(result, zero_reg, scratch);
1308 Branch(&done, eq, scratch, Operand(zero_reg));
1309
1310 // Express exponent as delta to (number of mantissa bits + 31).
1311 Subu(result,
1312 result,
1313 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1314
1315 // If the delta is strictly positive, all bits would be shifted away,
1316 // which means that we can return 0.
1317 Branch(&normal_exponent, le, result, Operand(zero_reg));
1318 mov(result, zero_reg);
1319 Branch(&done);
1320
1321 bind(&normal_exponent);
1322 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1323 // Calculate shift.
1324 Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1325
1326 // Save the sign.
1327 Register sign = result;
1328 result = no_reg;
1329 And(sign, input_high, Operand(HeapNumber::kSignMask));
1330
1331 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1332 // to check for this specific case.
1333 Label high_shift_needed, high_shift_done;
1334 Branch(&high_shift_needed, lt, scratch, Operand(32));
1335 mov(input_high, zero_reg);
1336 Branch(&high_shift_done);
1337 bind(&high_shift_needed);
1338
1339 // Set the implicit 1 before the mantissa part in input_high.
1340 Or(input_high,
1341 input_high,
1342 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1343 // Shift the mantissa bits to the correct position.
1344 // We don't need to clear non-mantissa bits as they will be shifted away.
1345 // If they weren't, it would mean that the answer is in the 32bit range.
1346 sllv(input_high, input_high, scratch);
1347
1348 bind(&high_shift_done);
1349
1350 // Replace the shifted bits with bits from the lower mantissa word.
1351 Label pos_shift, shift_done;
1352 li(at, 32);
1353 subu(scratch, at, scratch);
1354 Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1355
1356 // Negate scratch.
1357 Subu(scratch, zero_reg, scratch);
1358 sllv(input_low, input_low, scratch);
1359 Branch(&shift_done);
1360
1361 bind(&pos_shift);
1362 srlv(input_low, input_low, scratch);
1363
1364 bind(&shift_done);
1365 Or(input_high, input_high, Operand(input_low));
1366 // Restore sign if necessary.
1367 mov(scratch, sign);
1368 result = sign;
1369 sign = no_reg;
1370 Subu(result, zero_reg, input_high);
1371 movz(result, input_high, scratch);
1372 bind(&done);
1373}
1374
1375
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001376void MacroAssembler::EmitECMATruncate(Register result,
1377 FPURegister double_input,
1378 FPURegister single_scratch,
1379 Register scratch,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001380 Register scratch2,
1381 Register scratch3) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001382 CpuFeatures::Scope scope(FPU);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001383 ASSERT(!scratch2.is(result));
1384 ASSERT(!scratch3.is(result));
1385 ASSERT(!scratch3.is(scratch2));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001386 ASSERT(!scratch.is(result) &&
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001387 !scratch.is(scratch2) &&
1388 !scratch.is(scratch3));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001389 ASSERT(!single_scratch.is(double_input));
1390
1391 Label done;
1392 Label manual;
1393
1394 // Clear cumulative exception flags and save the FCSR.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001395 cfc1(scratch2, FCSR);
1396 ctc1(zero_reg, FCSR);
1397 // Try a conversion to a signed integer.
1398 trunc_w_d(single_scratch, double_input);
1399 mfc1(result, single_scratch);
1400 // Retrieve and restore the FCSR.
1401 cfc1(scratch, FCSR);
1402 ctc1(scratch2, FCSR);
1403 // Check for overflow and NaNs.
1404 And(scratch,
1405 scratch,
1406 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1407 // If we had no exceptions we are done.
1408 Branch(&done, eq, scratch, Operand(zero_reg));
1409
1410 // Load the double value and perform a manual truncation.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001411 Register input_high = scratch2;
1412 Register input_low = scratch3;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001413 Move(input_low, input_high, double_input);
1414 EmitOutOfInt32RangeTruncate(result,
1415 input_high,
1416 input_low,
1417 scratch);
1418 bind(&done);
1419}
1420
1421
Ben Murdoch257744e2011-11-30 15:57:28 +00001422void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1423 Register src,
1424 int num_least_bits) {
1425 Ext(dst, src, kSmiTagSize, num_least_bits);
1426}
1427
1428
1429void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1430 Register src,
1431 int num_least_bits) {
1432 And(dst, src, Operand((1 << num_least_bits) - 1));
1433}
1434
1435
Steve Block44f0eee2011-05-26 01:26:41 +01001436// Emulated condtional branches do not emit a nop in the branch delay slot.
1437//
1438// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1439#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1440 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1441 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1442
1443
1444void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001445 BranchShort(offset, bdslot);
1446}
1447
1448
1449void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1450 const Operand& rt,
1451 BranchDelaySlot bdslot) {
1452 BranchShort(offset, cond, rs, rt, bdslot);
1453}
1454
1455
1456void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001457 if (L->is_bound()) {
1458 if (is_near(L)) {
1459 BranchShort(L, bdslot);
1460 } else {
1461 Jr(L, bdslot);
1462 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001463 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001464 if (is_trampoline_emitted()) {
1465 Jr(L, bdslot);
1466 } else {
1467 BranchShort(L, bdslot);
1468 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001469 }
1470}
1471
1472
1473void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1474 const Operand& rt,
1475 BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001476 if (L->is_bound()) {
1477 if (is_near(L)) {
1478 BranchShort(L, cond, rs, rt, bdslot);
1479 } else {
1480 Label skip;
1481 Condition neg_cond = NegateCondition(cond);
1482 BranchShort(&skip, neg_cond, rs, rt);
1483 Jr(L, bdslot);
1484 bind(&skip);
1485 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001486 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001487 if (is_trampoline_emitted()) {
1488 Label skip;
1489 Condition neg_cond = NegateCondition(cond);
1490 BranchShort(&skip, neg_cond, rs, rt);
1491 Jr(L, bdslot);
1492 bind(&skip);
1493 } else {
1494 BranchShort(L, cond, rs, rt, bdslot);
1495 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001496 }
1497}
1498
1499
1500void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001501 b(offset);
1502
1503 // Emit a nop in the branch delay slot if required.
1504 if (bdslot == PROTECT)
1505 nop();
1506}
1507
1508
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001509void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1510 const Operand& rt,
1511 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001512 BRANCH_ARGS_CHECK(cond, rs, rt);
1513 ASSERT(!rs.is(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01001514 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01001515 Register scratch = at;
1516
Andrei Popescu31002712010-02-23 13:46:05 +00001517 if (rt.is_reg()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001518 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1519 // rt.
Andrei Popescu31002712010-02-23 13:46:05 +00001520 r2 = rt.rm_;
Steve Block44f0eee2011-05-26 01:26:41 +01001521 switch (cond) {
1522 case cc_always:
1523 b(offset);
1524 break;
1525 case eq:
1526 beq(rs, r2, offset);
1527 break;
1528 case ne:
1529 bne(rs, r2, offset);
1530 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001531 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001532 case greater:
1533 if (r2.is(zero_reg)) {
1534 bgtz(rs, offset);
1535 } else {
1536 slt(scratch, r2, rs);
1537 bne(scratch, zero_reg, offset);
1538 }
1539 break;
1540 case greater_equal:
1541 if (r2.is(zero_reg)) {
1542 bgez(rs, offset);
1543 } else {
1544 slt(scratch, rs, r2);
1545 beq(scratch, zero_reg, offset);
1546 }
1547 break;
1548 case less:
1549 if (r2.is(zero_reg)) {
1550 bltz(rs, offset);
1551 } else {
1552 slt(scratch, rs, r2);
1553 bne(scratch, zero_reg, offset);
1554 }
1555 break;
1556 case less_equal:
1557 if (r2.is(zero_reg)) {
1558 blez(rs, offset);
1559 } else {
1560 slt(scratch, r2, rs);
1561 beq(scratch, zero_reg, offset);
1562 }
1563 break;
Andrei Popescu31002712010-02-23 13:46:05 +00001564 // Unsigned comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001565 case Ugreater:
1566 if (r2.is(zero_reg)) {
1567 bgtz(rs, offset);
1568 } else {
1569 sltu(scratch, r2, rs);
1570 bne(scratch, zero_reg, offset);
1571 }
1572 break;
1573 case Ugreater_equal:
1574 if (r2.is(zero_reg)) {
1575 bgez(rs, offset);
1576 } else {
1577 sltu(scratch, rs, r2);
1578 beq(scratch, zero_reg, offset);
1579 }
1580 break;
1581 case Uless:
1582 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001583 // No code needs to be emitted.
1584 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001585 } else {
1586 sltu(scratch, rs, r2);
1587 bne(scratch, zero_reg, offset);
1588 }
1589 break;
1590 case Uless_equal:
1591 if (r2.is(zero_reg)) {
1592 b(offset);
1593 } else {
1594 sltu(scratch, r2, rs);
1595 beq(scratch, zero_reg, offset);
1596 }
1597 break;
1598 default:
1599 UNREACHABLE();
1600 }
1601 } else {
1602 // Be careful to always use shifted_branch_offset only just before the
1603 // branch instruction, as the location will be remember for patching the
1604 // target.
1605 switch (cond) {
1606 case cc_always:
1607 b(offset);
1608 break;
1609 case eq:
1610 // We don't want any other register but scratch clobbered.
1611 ASSERT(!scratch.is(rs));
1612 r2 = scratch;
1613 li(r2, rt);
1614 beq(rs, r2, offset);
1615 break;
1616 case ne:
1617 // We don't want any other register but scratch clobbered.
1618 ASSERT(!scratch.is(rs));
1619 r2 = scratch;
1620 li(r2, rt);
1621 bne(rs, r2, offset);
1622 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001623 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001624 case greater:
1625 if (rt.imm32_ == 0) {
1626 bgtz(rs, offset);
1627 } else {
1628 r2 = scratch;
1629 li(r2, rt);
1630 slt(scratch, r2, rs);
1631 bne(scratch, zero_reg, offset);
1632 }
1633 break;
1634 case greater_equal:
1635 if (rt.imm32_ == 0) {
1636 bgez(rs, offset);
1637 } else if (is_int16(rt.imm32_)) {
1638 slti(scratch, rs, rt.imm32_);
1639 beq(scratch, zero_reg, offset);
1640 } else {
1641 r2 = scratch;
1642 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001643 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001644 beq(scratch, zero_reg, offset);
1645 }
1646 break;
1647 case less:
1648 if (rt.imm32_ == 0) {
1649 bltz(rs, offset);
1650 } else if (is_int16(rt.imm32_)) {
1651 slti(scratch, rs, rt.imm32_);
1652 bne(scratch, zero_reg, offset);
1653 } else {
1654 r2 = scratch;
1655 li(r2, rt);
1656 slt(scratch, rs, r2);
1657 bne(scratch, zero_reg, offset);
1658 }
1659 break;
1660 case less_equal:
1661 if (rt.imm32_ == 0) {
1662 blez(rs, offset);
1663 } else {
1664 r2 = scratch;
1665 li(r2, rt);
1666 slt(scratch, r2, rs);
1667 beq(scratch, zero_reg, offset);
1668 }
1669 break;
1670 // Unsigned comparison.
1671 case Ugreater:
1672 if (rt.imm32_ == 0) {
1673 bgtz(rs, offset);
1674 } else {
1675 r2 = scratch;
1676 li(r2, rt);
1677 sltu(scratch, r2, rs);
1678 bne(scratch, zero_reg, offset);
1679 }
1680 break;
1681 case Ugreater_equal:
1682 if (rt.imm32_ == 0) {
1683 bgez(rs, offset);
1684 } else if (is_int16(rt.imm32_)) {
1685 sltiu(scratch, rs, rt.imm32_);
1686 beq(scratch, zero_reg, offset);
1687 } else {
1688 r2 = scratch;
1689 li(r2, rt);
1690 sltu(scratch, rs, r2);
1691 beq(scratch, zero_reg, offset);
1692 }
1693 break;
1694 case Uless:
1695 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001696 // No code needs to be emitted.
1697 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001698 } else if (is_int16(rt.imm32_)) {
1699 sltiu(scratch, rs, rt.imm32_);
1700 bne(scratch, zero_reg, offset);
1701 } else {
1702 r2 = scratch;
1703 li(r2, rt);
1704 sltu(scratch, rs, r2);
1705 bne(scratch, zero_reg, offset);
1706 }
1707 break;
1708 case Uless_equal:
1709 if (rt.imm32_ == 0) {
1710 b(offset);
1711 } else {
1712 r2 = scratch;
1713 li(r2, rt);
1714 sltu(scratch, r2, rs);
1715 beq(scratch, zero_reg, offset);
1716 }
1717 break;
1718 default:
1719 UNREACHABLE();
1720 }
Andrei Popescu31002712010-02-23 13:46:05 +00001721 }
Steve Block44f0eee2011-05-26 01:26:41 +01001722 // Emit a nop in the branch delay slot if required.
1723 if (bdslot == PROTECT)
1724 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001725}
1726
1727
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001728void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
Andrei Popescu31002712010-02-23 13:46:05 +00001729 // We use branch_offset as an argument for the branch instructions to be sure
1730 // it is called just before generating the branch instruction, as needed.
1731
Steve Block44f0eee2011-05-26 01:26:41 +01001732 b(shifted_branch_offset(L, false));
Andrei Popescu31002712010-02-23 13:46:05 +00001733
Steve Block44f0eee2011-05-26 01:26:41 +01001734 // Emit a nop in the branch delay slot if required.
1735 if (bdslot == PROTECT)
1736 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00001737}
1738
1739
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001740void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1741 const Operand& rt,
1742 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01001743 BRANCH_ARGS_CHECK(cond, rs, rt);
1744
1745 int32_t offset;
1746 Register r2 = no_reg;
1747 Register scratch = at;
1748 if (rt.is_reg()) {
1749 r2 = rt.rm_;
1750 // Be careful to always use shifted_branch_offset only just before the
1751 // branch instruction, as the location will be remember for patching the
1752 // target.
1753 switch (cond) {
1754 case cc_always:
1755 offset = shifted_branch_offset(L, false);
1756 b(offset);
1757 break;
1758 case eq:
1759 offset = shifted_branch_offset(L, false);
1760 beq(rs, r2, offset);
1761 break;
1762 case ne:
1763 offset = shifted_branch_offset(L, false);
1764 bne(rs, r2, offset);
1765 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001766 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001767 case greater:
1768 if (r2.is(zero_reg)) {
1769 offset = shifted_branch_offset(L, false);
1770 bgtz(rs, offset);
1771 } else {
1772 slt(scratch, r2, rs);
1773 offset = shifted_branch_offset(L, false);
1774 bne(scratch, zero_reg, offset);
1775 }
1776 break;
1777 case greater_equal:
1778 if (r2.is(zero_reg)) {
1779 offset = shifted_branch_offset(L, false);
1780 bgez(rs, offset);
1781 } else {
1782 slt(scratch, rs, r2);
1783 offset = shifted_branch_offset(L, false);
1784 beq(scratch, zero_reg, offset);
1785 }
1786 break;
1787 case less:
1788 if (r2.is(zero_reg)) {
1789 offset = shifted_branch_offset(L, false);
1790 bltz(rs, offset);
1791 } else {
1792 slt(scratch, rs, r2);
1793 offset = shifted_branch_offset(L, false);
1794 bne(scratch, zero_reg, offset);
1795 }
1796 break;
1797 case less_equal:
1798 if (r2.is(zero_reg)) {
1799 offset = shifted_branch_offset(L, false);
1800 blez(rs, offset);
1801 } else {
1802 slt(scratch, r2, rs);
1803 offset = shifted_branch_offset(L, false);
1804 beq(scratch, zero_reg, offset);
1805 }
1806 break;
1807 // Unsigned comparison.
1808 case Ugreater:
1809 if (r2.is(zero_reg)) {
1810 offset = shifted_branch_offset(L, false);
1811 bgtz(rs, offset);
1812 } else {
1813 sltu(scratch, r2, rs);
1814 offset = shifted_branch_offset(L, false);
1815 bne(scratch, zero_reg, offset);
1816 }
1817 break;
1818 case Ugreater_equal:
1819 if (r2.is(zero_reg)) {
1820 offset = shifted_branch_offset(L, false);
1821 bgez(rs, offset);
1822 } else {
1823 sltu(scratch, rs, r2);
1824 offset = shifted_branch_offset(L, false);
1825 beq(scratch, zero_reg, offset);
1826 }
1827 break;
1828 case Uless:
1829 if (r2.is(zero_reg)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001830 // No code needs to be emitted.
1831 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001832 } else {
1833 sltu(scratch, rs, r2);
1834 offset = shifted_branch_offset(L, false);
1835 bne(scratch, zero_reg, offset);
1836 }
1837 break;
1838 case Uless_equal:
1839 if (r2.is(zero_reg)) {
1840 offset = shifted_branch_offset(L, false);
1841 b(offset);
1842 } else {
1843 sltu(scratch, r2, rs);
1844 offset = shifted_branch_offset(L, false);
1845 beq(scratch, zero_reg, offset);
1846 }
1847 break;
1848 default:
1849 UNREACHABLE();
1850 }
1851 } else {
1852 // Be careful to always use shifted_branch_offset only just before the
1853 // branch instruction, as the location will be remember for patching the
1854 // target.
1855 switch (cond) {
1856 case cc_always:
1857 offset = shifted_branch_offset(L, false);
1858 b(offset);
1859 break;
1860 case eq:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001861 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001862 r2 = scratch;
1863 li(r2, rt);
1864 offset = shifted_branch_offset(L, false);
1865 beq(rs, r2, offset);
1866 break;
1867 case ne:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001868 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001869 r2 = scratch;
1870 li(r2, rt);
1871 offset = shifted_branch_offset(L, false);
1872 bne(rs, r2, offset);
1873 break;
Ben Murdoch257744e2011-11-30 15:57:28 +00001874 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01001875 case greater:
1876 if (rt.imm32_ == 0) {
1877 offset = shifted_branch_offset(L, false);
1878 bgtz(rs, offset);
1879 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001880 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001881 r2 = scratch;
1882 li(r2, rt);
1883 slt(scratch, r2, rs);
1884 offset = shifted_branch_offset(L, false);
1885 bne(scratch, zero_reg, offset);
1886 }
1887 break;
1888 case greater_equal:
1889 if (rt.imm32_ == 0) {
1890 offset = shifted_branch_offset(L, false);
1891 bgez(rs, offset);
1892 } else if (is_int16(rt.imm32_)) {
1893 slti(scratch, rs, rt.imm32_);
1894 offset = shifted_branch_offset(L, false);
1895 beq(scratch, zero_reg, offset);
1896 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001897 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001898 r2 = scratch;
1899 li(r2, rt);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001900 slt(scratch, rs, r2);
Steve Block44f0eee2011-05-26 01:26:41 +01001901 offset = shifted_branch_offset(L, false);
1902 beq(scratch, zero_reg, offset);
1903 }
1904 break;
1905 case less:
1906 if (rt.imm32_ == 0) {
1907 offset = shifted_branch_offset(L, false);
1908 bltz(rs, offset);
1909 } else if (is_int16(rt.imm32_)) {
1910 slti(scratch, rs, rt.imm32_);
1911 offset = shifted_branch_offset(L, false);
1912 bne(scratch, zero_reg, offset);
1913 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001914 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001915 r2 = scratch;
1916 li(r2, rt);
1917 slt(scratch, rs, r2);
1918 offset = shifted_branch_offset(L, false);
1919 bne(scratch, zero_reg, offset);
1920 }
1921 break;
1922 case less_equal:
1923 if (rt.imm32_ == 0) {
1924 offset = shifted_branch_offset(L, false);
1925 blez(rs, offset);
1926 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001927 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001928 r2 = scratch;
1929 li(r2, rt);
1930 slt(scratch, r2, rs);
1931 offset = shifted_branch_offset(L, false);
1932 beq(scratch, zero_reg, offset);
1933 }
1934 break;
1935 // Unsigned comparison.
1936 case Ugreater:
1937 if (rt.imm32_ == 0) {
1938 offset = shifted_branch_offset(L, false);
1939 bgtz(rs, offset);
1940 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001941 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001942 r2 = scratch;
1943 li(r2, rt);
1944 sltu(scratch, r2, rs);
1945 offset = shifted_branch_offset(L, false);
1946 bne(scratch, zero_reg, offset);
1947 }
1948 break;
1949 case Ugreater_equal:
1950 if (rt.imm32_ == 0) {
1951 offset = shifted_branch_offset(L, false);
1952 bgez(rs, offset);
1953 } else if (is_int16(rt.imm32_)) {
1954 sltiu(scratch, rs, rt.imm32_);
1955 offset = shifted_branch_offset(L, false);
1956 beq(scratch, zero_reg, offset);
1957 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001958 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001959 r2 = scratch;
1960 li(r2, rt);
1961 sltu(scratch, rs, r2);
1962 offset = shifted_branch_offset(L, false);
1963 beq(scratch, zero_reg, offset);
1964 }
1965 break;
1966 case Uless:
1967 if (rt.imm32_ == 0) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001968 // No code needs to be emitted.
1969 return;
Steve Block44f0eee2011-05-26 01:26:41 +01001970 } else if (is_int16(rt.imm32_)) {
1971 sltiu(scratch, rs, rt.imm32_);
1972 offset = shifted_branch_offset(L, false);
1973 bne(scratch, zero_reg, offset);
1974 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001975 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001976 r2 = scratch;
1977 li(r2, rt);
1978 sltu(scratch, rs, r2);
1979 offset = shifted_branch_offset(L, false);
1980 bne(scratch, zero_reg, offset);
1981 }
1982 break;
1983 case Uless_equal:
1984 if (rt.imm32_ == 0) {
1985 offset = shifted_branch_offset(L, false);
1986 b(offset);
1987 } else {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001988 ASSERT(!scratch.is(rs));
Steve Block44f0eee2011-05-26 01:26:41 +01001989 r2 = scratch;
1990 li(r2, rt);
1991 sltu(scratch, r2, rs);
1992 offset = shifted_branch_offset(L, false);
1993 beq(scratch, zero_reg, offset);
1994 }
1995 break;
1996 default:
1997 UNREACHABLE();
1998 }
1999 }
2000 // Check that offset could actually hold on an int16_t.
2001 ASSERT(is_int16(offset));
2002 // Emit a nop in the branch delay slot if required.
2003 if (bdslot == PROTECT)
2004 nop();
2005}
2006
2007
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002008void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2009 BranchAndLinkShort(offset, bdslot);
2010}
2011
2012
2013void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2014 const Operand& rt,
2015 BranchDelaySlot bdslot) {
2016 BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2017}
2018
2019
2020void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002021 if (L->is_bound()) {
2022 if (is_near(L)) {
2023 BranchAndLinkShort(L, bdslot);
2024 } else {
2025 Jalr(L, bdslot);
2026 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002027 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002028 if (is_trampoline_emitted()) {
2029 Jalr(L, bdslot);
2030 } else {
2031 BranchAndLinkShort(L, bdslot);
2032 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002033 }
2034}
2035
2036
2037void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2038 const Operand& rt,
2039 BranchDelaySlot bdslot) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002040 if (L->is_bound()) {
2041 if (is_near(L)) {
2042 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2043 } else {
2044 Label skip;
2045 Condition neg_cond = NegateCondition(cond);
2046 BranchShort(&skip, neg_cond, rs, rt);
2047 Jalr(L, bdslot);
2048 bind(&skip);
2049 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002050 } else {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002051 if (is_trampoline_emitted()) {
2052 Label skip;
2053 Condition neg_cond = NegateCondition(cond);
2054 BranchShort(&skip, neg_cond, rs, rt);
2055 Jalr(L, bdslot);
2056 bind(&skip);
2057 } else {
2058 BranchAndLinkShort(L, cond, rs, rt, bdslot);
2059 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002060 }
2061}
2062
2063
Andrei Popescu31002712010-02-23 13:46:05 +00002064// We need to use a bgezal or bltzal, but they can't be used directly with the
2065// slt instructions. We could use sub or add instead but we would miss overflow
2066// cases, so we keep slt and add an intermediate third instruction.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002067void MacroAssembler::BranchAndLinkShort(int16_t offset,
2068 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002069 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002070
Steve Block44f0eee2011-05-26 01:26:41 +01002071 // Emit a nop in the branch delay slot if required.
2072 if (bdslot == PROTECT)
2073 nop();
Andrei Popescu31002712010-02-23 13:46:05 +00002074}
2075
2076
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002077void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2078 Register rs, const Operand& rt,
2079 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002080 BRANCH_ARGS_CHECK(cond, rs, rt);
Steve Block6ded16b2010-05-10 14:33:55 +01002081 Register r2 = no_reg;
Steve Block44f0eee2011-05-26 01:26:41 +01002082 Register scratch = at;
2083
Andrei Popescu31002712010-02-23 13:46:05 +00002084 if (rt.is_reg()) {
2085 r2 = rt.rm_;
2086 } else if (cond != cc_always) {
2087 r2 = scratch;
2088 li(r2, rt);
2089 }
2090
2091 switch (cond) {
2092 case cc_always:
Steve Block44f0eee2011-05-26 01:26:41 +01002093 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002094 break;
2095 case eq:
2096 bne(rs, r2, 2);
2097 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002098 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002099 break;
2100 case ne:
2101 beq(rs, r2, 2);
2102 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002103 bal(offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002104 break;
2105
Ben Murdoch257744e2011-11-30 15:57:28 +00002106 // Signed comparison.
Andrei Popescu31002712010-02-23 13:46:05 +00002107 case greater:
2108 slt(scratch, r2, rs);
2109 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002110 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002111 break;
2112 case greater_equal:
2113 slt(scratch, rs, r2);
2114 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002115 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002116 break;
2117 case less:
2118 slt(scratch, rs, r2);
2119 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002120 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002121 break;
2122 case less_equal:
2123 slt(scratch, r2, rs);
2124 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002125 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002126 break;
2127
2128 // Unsigned comparison.
2129 case Ugreater:
2130 sltu(scratch, r2, rs);
2131 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002132 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002133 break;
2134 case Ugreater_equal:
2135 sltu(scratch, rs, r2);
2136 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002137 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002138 break;
2139 case Uless:
2140 sltu(scratch, rs, r2);
2141 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002142 bgezal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002143 break;
2144 case Uless_equal:
2145 sltu(scratch, r2, rs);
2146 addiu(scratch, scratch, -1);
Steve Block44f0eee2011-05-26 01:26:41 +01002147 bltzal(scratch, offset);
Andrei Popescu31002712010-02-23 13:46:05 +00002148 break;
2149
2150 default:
2151 UNREACHABLE();
2152 }
Steve Block44f0eee2011-05-26 01:26:41 +01002153 // Emit a nop in the branch delay slot if required.
2154 if (bdslot == PROTECT)
2155 nop();
2156}
2157
2158
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002159void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002160 bal(shifted_branch_offset(L, false));
2161
2162 // Emit a nop in the branch delay slot if required.
2163 if (bdslot == PROTECT)
2164 nop();
2165}
2166
2167
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002168void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2169 const Operand& rt,
2170 BranchDelaySlot bdslot) {
Steve Block44f0eee2011-05-26 01:26:41 +01002171 BRANCH_ARGS_CHECK(cond, rs, rt);
2172
2173 int32_t offset;
2174 Register r2 = no_reg;
2175 Register scratch = at;
2176 if (rt.is_reg()) {
2177 r2 = rt.rm_;
2178 } else if (cond != cc_always) {
2179 r2 = scratch;
2180 li(r2, rt);
2181 }
2182
2183 switch (cond) {
2184 case cc_always:
2185 offset = shifted_branch_offset(L, false);
2186 bal(offset);
2187 break;
2188 case eq:
2189 bne(rs, r2, 2);
2190 nop();
2191 offset = shifted_branch_offset(L, false);
2192 bal(offset);
2193 break;
2194 case ne:
2195 beq(rs, r2, 2);
2196 nop();
2197 offset = shifted_branch_offset(L, false);
2198 bal(offset);
2199 break;
2200
Ben Murdoch257744e2011-11-30 15:57:28 +00002201 // Signed comparison.
Steve Block44f0eee2011-05-26 01:26:41 +01002202 case greater:
2203 slt(scratch, r2, rs);
2204 addiu(scratch, scratch, -1);
2205 offset = shifted_branch_offset(L, false);
2206 bgezal(scratch, offset);
2207 break;
2208 case greater_equal:
2209 slt(scratch, rs, r2);
2210 addiu(scratch, scratch, -1);
2211 offset = shifted_branch_offset(L, false);
2212 bltzal(scratch, offset);
2213 break;
2214 case less:
2215 slt(scratch, rs, r2);
2216 addiu(scratch, scratch, -1);
2217 offset = shifted_branch_offset(L, false);
2218 bgezal(scratch, offset);
2219 break;
2220 case less_equal:
2221 slt(scratch, r2, rs);
2222 addiu(scratch, scratch, -1);
2223 offset = shifted_branch_offset(L, false);
2224 bltzal(scratch, offset);
2225 break;
2226
2227 // Unsigned comparison.
2228 case Ugreater:
2229 sltu(scratch, r2, rs);
2230 addiu(scratch, scratch, -1);
2231 offset = shifted_branch_offset(L, false);
2232 bgezal(scratch, offset);
2233 break;
2234 case Ugreater_equal:
2235 sltu(scratch, rs, r2);
2236 addiu(scratch, scratch, -1);
2237 offset = shifted_branch_offset(L, false);
2238 bltzal(scratch, offset);
2239 break;
2240 case Uless:
2241 sltu(scratch, rs, r2);
2242 addiu(scratch, scratch, -1);
2243 offset = shifted_branch_offset(L, false);
2244 bgezal(scratch, offset);
2245 break;
2246 case Uless_equal:
2247 sltu(scratch, r2, rs);
2248 addiu(scratch, scratch, -1);
2249 offset = shifted_branch_offset(L, false);
2250 bltzal(scratch, offset);
2251 break;
2252
2253 default:
2254 UNREACHABLE();
2255 }
2256
2257 // Check that offset could actually hold on an int16_t.
2258 ASSERT(is_int16(offset));
2259
2260 // Emit a nop in the branch delay slot if required.
2261 if (bdslot == PROTECT)
2262 nop();
2263}
2264
2265
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002266void MacroAssembler::Jump(Register target,
Steve Block44f0eee2011-05-26 01:26:41 +01002267 Condition cond,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002268 Register rs,
2269 const Operand& rt,
2270 BranchDelaySlot bd) {
2271 BlockTrampolinePoolScope block_trampoline_pool(this);
2272 if (cond == cc_always) {
2273 jr(target);
2274 } else {
2275 BRANCH_ARGS_CHECK(cond, rs, rt);
2276 Branch(2, NegateCondition(cond), rs, rt);
2277 jr(target);
2278 }
2279 // Emit a nop in the branch delay slot if required.
2280 if (bd == PROTECT)
2281 nop();
2282}
2283
2284
2285void MacroAssembler::Jump(intptr_t target,
2286 RelocInfo::Mode rmode,
2287 Condition cond,
2288 Register rs,
2289 const Operand& rt,
2290 BranchDelaySlot bd) {
2291 li(t9, Operand(target, rmode));
2292 Jump(t9, cond, rs, rt, bd);
2293}
2294
2295
2296void MacroAssembler::Jump(Address target,
2297 RelocInfo::Mode rmode,
2298 Condition cond,
2299 Register rs,
2300 const Operand& rt,
2301 BranchDelaySlot bd) {
2302 ASSERT(!RelocInfo::IsCodeTarget(rmode));
2303 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2304}
2305
2306
2307void MacroAssembler::Jump(Handle<Code> code,
2308 RelocInfo::Mode rmode,
2309 Condition cond,
2310 Register rs,
2311 const Operand& rt,
2312 BranchDelaySlot bd) {
2313 ASSERT(RelocInfo::IsCodeTarget(rmode));
2314 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2315}
2316
2317
2318int MacroAssembler::CallSize(Register target,
2319 Condition cond,
2320 Register rs,
2321 const Operand& rt,
2322 BranchDelaySlot bd) {
2323 int size = 0;
2324
2325 if (cond == cc_always) {
2326 size += 1;
2327 } else {
2328 size += 3;
Steve Block44f0eee2011-05-26 01:26:41 +01002329 }
2330
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002331 if (bd == PROTECT)
2332 size += 1;
Steve Block44f0eee2011-05-26 01:26:41 +01002333
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002334 return size * kInstrSize;
2335}
Steve Block44f0eee2011-05-26 01:26:41 +01002336
Steve Block44f0eee2011-05-26 01:26:41 +01002337
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002338// Note: To call gcc-compiled C code on mips, you must call thru t9.
2339void MacroAssembler::Call(Register target,
2340 Condition cond,
2341 Register rs,
2342 const Operand& rt,
2343 BranchDelaySlot bd) {
2344 BlockTrampolinePoolScope block_trampoline_pool(this);
2345 Label start;
2346 bind(&start);
2347 if (cond == cc_always) {
2348 jalr(target);
2349 } else {
2350 BRANCH_ARGS_CHECK(cond, rs, rt);
2351 Branch(2, NegateCondition(cond), rs, rt);
2352 jalr(target);
Steve Block44f0eee2011-05-26 01:26:41 +01002353 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002354 // Emit a nop in the branch delay slot if required.
2355 if (bd == PROTECT)
2356 nop();
2357
2358 ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2359 SizeOfCodeGeneratedSince(&start));
2360}
2361
2362
2363int MacroAssembler::CallSize(Address target,
2364 RelocInfo::Mode rmode,
2365 Condition cond,
2366 Register rs,
2367 const Operand& rt,
2368 BranchDelaySlot bd) {
2369 int size = CallSize(t9, cond, rs, rt, bd);
2370 return size + 2 * kInstrSize;
2371}
2372
2373
2374void MacroAssembler::Call(Address target,
2375 RelocInfo::Mode rmode,
2376 Condition cond,
2377 Register rs,
2378 const Operand& rt,
2379 BranchDelaySlot bd) {
2380 BlockTrampolinePoolScope block_trampoline_pool(this);
2381 Label start;
2382 bind(&start);
2383 int32_t target_int = reinterpret_cast<int32_t>(target);
2384 // Must record previous source positions before the
2385 // li() generates a new code target.
2386 positions_recorder()->WriteRecordedPositions();
2387 li(t9, Operand(target_int, rmode), true);
2388 Call(t9, cond, rs, rt, bd);
2389 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2390 SizeOfCodeGeneratedSince(&start));
2391}
2392
2393
2394int MacroAssembler::CallSize(Handle<Code> code,
2395 RelocInfo::Mode rmode,
2396 unsigned ast_id,
2397 Condition cond,
2398 Register rs,
2399 const Operand& rt,
2400 BranchDelaySlot bd) {
2401 return CallSize(reinterpret_cast<Address>(code.location()),
2402 rmode, cond, rs, rt, bd);
2403}
2404
2405
2406void MacroAssembler::Call(Handle<Code> code,
2407 RelocInfo::Mode rmode,
2408 unsigned ast_id,
2409 Condition cond,
2410 Register rs,
2411 const Operand& rt,
2412 BranchDelaySlot bd) {
2413 BlockTrampolinePoolScope block_trampoline_pool(this);
2414 Label start;
2415 bind(&start);
2416 ASSERT(RelocInfo::IsCodeTarget(rmode));
2417 if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2418 SetRecordedAstId(ast_id);
2419 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2420 }
2421 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2422 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
2423 SizeOfCodeGeneratedSince(&start));
2424}
2425
2426
2427void MacroAssembler::Ret(Condition cond,
2428 Register rs,
2429 const Operand& rt,
2430 BranchDelaySlot bd) {
2431 Jump(ra, cond, rs, rt, bd);
2432}
2433
2434
2435void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2436 BlockTrampolinePoolScope block_trampoline_pool(this);
2437
2438 uint32_t imm28;
2439 imm28 = jump_address(L);
2440 imm28 &= kImm28Mask;
2441 { BlockGrowBufferScope block_buf_growth(this);
2442 // Buffer growth (and relocation) must be blocked for internal references
2443 // until associated instructions are emitted and available to be patched.
2444 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2445 j(imm28);
2446 }
2447 // Emit a nop in the branch delay slot if required.
2448 if (bdslot == PROTECT)
2449 nop();
2450}
2451
2452
2453void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2454 BlockTrampolinePoolScope block_trampoline_pool(this);
2455
2456 uint32_t imm32;
2457 imm32 = jump_address(L);
2458 { BlockGrowBufferScope block_buf_growth(this);
2459 // Buffer growth (and relocation) must be blocked for internal references
2460 // until associated instructions are emitted and available to be patched.
2461 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2462 lui(at, (imm32 & kHiMask) >> kLuiShift);
2463 ori(at, at, (imm32 & kImm16Mask));
2464 }
2465 jr(at);
2466
2467 // Emit a nop in the branch delay slot if required.
2468 if (bdslot == PROTECT)
2469 nop();
2470}
2471
2472
2473void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2474 BlockTrampolinePoolScope block_trampoline_pool(this);
2475
2476 uint32_t imm32;
2477 imm32 = jump_address(L);
2478 { BlockGrowBufferScope block_buf_growth(this);
2479 // Buffer growth (and relocation) must be blocked for internal references
2480 // until associated instructions are emitted and available to be patched.
2481 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2482 lui(at, (imm32 & kHiMask) >> kLuiShift);
2483 ori(at, at, (imm32 & kImm16Mask));
2484 }
2485 jalr(at);
2486
2487 // Emit a nop in the branch delay slot if required.
2488 if (bdslot == PROTECT)
2489 nop();
Steve Block44f0eee2011-05-26 01:26:41 +01002490}
2491
2492
2493void MacroAssembler::DropAndRet(int drop,
2494 Condition cond,
2495 Register r1,
2496 const Operand& r2) {
2497 // This is a workaround to make sure only one branch instruction is
2498 // generated. It relies on Drop and Ret not creating branches if
2499 // cond == cc_always.
2500 Label skip;
2501 if (cond != cc_always) {
2502 Branch(&skip, NegateCondition(cond), r1, r2);
2503 }
2504
2505 Drop(drop);
2506 Ret();
2507
2508 if (cond != cc_always) {
2509 bind(&skip);
2510 }
2511}
2512
2513
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002514void MacroAssembler::Drop(int count,
2515 Condition cond,
2516 Register reg,
2517 const Operand& op) {
2518 if (count <= 0) {
2519 return;
2520 }
2521
2522 Label skip;
2523
2524 if (cond != al) {
2525 Branch(&skip, NegateCondition(cond), reg, op);
2526 }
2527
2528 addiu(sp, sp, count * kPointerSize);
2529
2530 if (cond != al) {
2531 bind(&skip);
2532 }
2533}
2534
2535
2536
Steve Block44f0eee2011-05-26 01:26:41 +01002537void MacroAssembler::Swap(Register reg1,
2538 Register reg2,
2539 Register scratch) {
2540 if (scratch.is(no_reg)) {
2541 Xor(reg1, reg1, Operand(reg2));
2542 Xor(reg2, reg2, Operand(reg1));
2543 Xor(reg1, reg1, Operand(reg2));
2544 } else {
2545 mov(scratch, reg1);
2546 mov(reg1, reg2);
2547 mov(reg2, scratch);
2548 }
Andrei Popescu31002712010-02-23 13:46:05 +00002549}
2550
2551
2552void MacroAssembler::Call(Label* target) {
Steve Block44f0eee2011-05-26 01:26:41 +01002553 BranchAndLink(target);
2554}
2555
2556
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002557void MacroAssembler::Push(Handle<Object> handle) {
2558 li(at, Operand(handle));
2559 push(at);
2560}
2561
2562
Steve Block6ded16b2010-05-10 14:33:55 +01002563#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002564
Steve Block44f0eee2011-05-26 01:26:41 +01002565void MacroAssembler::DebugBreak() {
Steve Block44f0eee2011-05-26 01:26:41 +01002566 mov(a0, zero_reg);
2567 li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2568 CEntryStub ces(1);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002569 ASSERT(AllowThisStubCall(&ces));
Steve Block44f0eee2011-05-26 01:26:41 +01002570 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2571}
2572
2573#endif // ENABLE_DEBUGGER_SUPPORT
Steve Block6ded16b2010-05-10 14:33:55 +01002574
2575
Andrei Popescu31002712010-02-23 13:46:05 +00002576// ---------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00002577// Exception handling.
Andrei Popescu31002712010-02-23 13:46:05 +00002578
2579void MacroAssembler::PushTryHandler(CodeLocation try_location,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002580 HandlerType type,
2581 int handler_index) {
Steve Block6ded16b2010-05-10 14:33:55 +01002582 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002583 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2584 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002585 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2586 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2587 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2588 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002589
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002590 // For the JSEntry handler, we must preserve a0-a3 and s0.
2591 // t1-t3 are available. We will build up the handler from the bottom by
2592 // pushing on the stack. First compute the state.
2593 unsigned state = StackHandler::OffsetField::encode(handler_index);
Steve Block6ded16b2010-05-10 14:33:55 +01002594 if (try_location == IN_JAVASCRIPT) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002595 state |= (type == TRY_CATCH_HANDLER)
2596 ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
2597 : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
Steve Block6ded16b2010-05-10 14:33:55 +01002598 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002599 ASSERT(try_location == IN_JS_ENTRY);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002600 state |= StackHandler::KindField::encode(StackHandler::ENTRY);
Steve Block6ded16b2010-05-10 14:33:55 +01002601 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002602
2603 // Set up the code object (t1) and the state (t2) for pushing.
2604 li(t1, Operand(CodeObject()));
2605 li(t2, Operand(state));
2606
2607 // Push the frame pointer, context, state, and code object.
2608 if (try_location == IN_JAVASCRIPT) {
2609 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2610 } else {
2611 ASSERT_EQ(Smi::FromInt(0), 0);
2612 // The second zero_reg indicates no context.
2613 // The first zero_reg is the NULL frame pointer.
2614 // The operands are reversed to match the order of MultiPush/Pop.
2615 Push(zero_reg, zero_reg, t2, t1);
2616 }
2617
2618 // Link the current handler as the next handler.
2619 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2620 lw(t1, MemOperand(t2));
2621 push(t1);
2622 // Set this new handler as the current one.
2623 sw(sp, MemOperand(t2));
Andrei Popescu31002712010-02-23 13:46:05 +00002624}
2625
2626
2627void MacroAssembler::PopTryHandler() {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002628 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01002629 pop(a1);
2630 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
Ben Murdoch589d6972011-11-30 16:04:58 +00002631 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01002632 sw(a1, MemOperand(at));
Andrei Popescu31002712010-02-23 13:46:05 +00002633}
2634
2635
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002636void MacroAssembler::JumpToHandlerEntry() {
2637 // Compute the handler entry address and jump to it. The handler table is
2638 // a fixed array of (smi-tagged) code offsets.
2639 // v0 = exception, a1 = code object, a2 = state.
2640 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
2641 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2642 srl(a2, a2, StackHandler::kKindWidth); // Handler index.
2643 sll(a2, a2, kPointerSizeLog2);
2644 Addu(a2, a3, a2);
2645 lw(a2, MemOperand(a2)); // Smi-tagged offset.
2646 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2647 sra(t9, a2, kSmiTagSize);
2648 Addu(t9, t9, a1);
2649 Jump(t9); // Jump.
2650}
Ben Murdoch257744e2011-11-30 15:57:28 +00002651
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002652
2653void MacroAssembler::Throw(Register value) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002654 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002655 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002656 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2657 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2658 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2659 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2660 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002661
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002662 // The exception is expected in v0.
2663 Move(v0, value);
2664
2665 // Drop the stack pointer to the top of the top handler.
Ben Murdoch589d6972011-11-30 16:04:58 +00002666 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002667 isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002668 lw(sp, MemOperand(a3));
2669
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002670 // Restore the next handler.
Ben Murdoch257744e2011-11-30 15:57:28 +00002671 pop(a2);
2672 sw(a2, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002673
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002674 // Get the code object (a1) and state (a2). Restore the context and frame
2675 // pointer.
2676 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002677
2678 // If the handler is a JS frame, restore the context to the frame.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002679 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2680 // or cp.
Ben Murdoch257744e2011-11-30 15:57:28 +00002681 Label done;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002682 Branch(&done, eq, cp, Operand(zero_reg));
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002683 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00002684 bind(&done);
2685
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002686 JumpToHandlerEntry();
Ben Murdoch257744e2011-11-30 15:57:28 +00002687}
2688
2689
2690void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
2691 Register value) {
2692 // Adjust this code if not the case.
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002693 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2694 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002695 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2696 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2697 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2698 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Ben Murdoch257744e2011-11-30 15:57:28 +00002699
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002700 // The exception is expected in v0.
Ben Murdoch257744e2011-11-30 15:57:28 +00002701 if (type == OUT_OF_MEMORY) {
2702 // Set external caught exception to false.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002703 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
2704 isolate());
Ben Murdoch257744e2011-11-30 15:57:28 +00002705 li(a0, Operand(false, RelocInfo::NONE));
2706 li(a2, Operand(external_caught));
2707 sw(a0, MemOperand(a2));
2708
2709 // Set pending exception and v0 to out of memory exception.
2710 Failure* out_of_memory = Failure::OutOfMemoryException();
2711 li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
Ben Murdoch589d6972011-11-30 16:04:58 +00002712 li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002713 isolate())));
Ben Murdoch257744e2011-11-30 15:57:28 +00002714 sw(v0, MemOperand(a2));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002715 } else if (!value.is(v0)) {
2716 mov(v0, value);
Ben Murdoch257744e2011-11-30 15:57:28 +00002717 }
2718
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002719 // Drop the stack pointer to the top of the top stack handler.
2720 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2721 lw(sp, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002722
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002723 // Unwind the handlers until the ENTRY handler is found.
2724 Label fetch_next, check_kind;
2725 jmp(&check_kind);
2726 bind(&fetch_next);
2727 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00002728
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002729 bind(&check_kind);
2730 STATIC_ASSERT(StackHandler::ENTRY == 0);
2731 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2732 And(a2, a2, Operand(StackHandler::KindField::kMask));
2733 Branch(&fetch_next, ne, a2, Operand(zero_reg));
Ben Murdoch257744e2011-11-30 15:57:28 +00002734
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002735 // Set the top handler address to next handler past the top ENTRY handler.
2736 pop(a2);
2737 sw(a2, MemOperand(a3));
Ben Murdoch257744e2011-11-30 15:57:28 +00002738
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002739 // Get the code object (a1) and state (a2). Clear the context and frame
2740 // pointer (0 was saved in the handler).
2741 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
Ben Murdoch257744e2011-11-30 15:57:28 +00002742
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002743 JumpToHandlerEntry();
Ben Murdoch257744e2011-11-30 15:57:28 +00002744}
2745
2746
Steve Block44f0eee2011-05-26 01:26:41 +01002747void MacroAssembler::AllocateInNewSpace(int object_size,
2748 Register result,
2749 Register scratch1,
2750 Register scratch2,
2751 Label* gc_required,
2752 AllocationFlags flags) {
2753 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002754 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002755 // Trash the registers to simulate an allocation failure.
2756 li(result, 0x7091);
2757 li(scratch1, 0x7191);
2758 li(scratch2, 0x7291);
2759 }
2760 jmp(gc_required);
2761 return;
Steve Block6ded16b2010-05-10 14:33:55 +01002762 }
2763
Steve Block44f0eee2011-05-26 01:26:41 +01002764 ASSERT(!result.is(scratch1));
2765 ASSERT(!result.is(scratch2));
2766 ASSERT(!scratch1.is(scratch2));
2767 ASSERT(!scratch1.is(t9));
2768 ASSERT(!scratch2.is(t9));
2769 ASSERT(!result.is(t9));
Steve Block6ded16b2010-05-10 14:33:55 +01002770
Steve Block44f0eee2011-05-26 01:26:41 +01002771 // Make object size into bytes.
2772 if ((flags & SIZE_IN_WORDS) != 0) {
2773 object_size *= kPointerSize;
2774 }
2775 ASSERT_EQ(0, object_size & kObjectAlignmentMask);
Steve Block6ded16b2010-05-10 14:33:55 +01002776
Steve Block44f0eee2011-05-26 01:26:41 +01002777 // Check relative positions of allocation top and limit addresses.
2778 // ARM adds additional checks to make sure the ldm instruction can be
2779 // used. On MIPS we don't have ldm so we don't need additional checks either.
2780 ExternalReference new_space_allocation_top =
2781 ExternalReference::new_space_allocation_top_address(isolate());
2782 ExternalReference new_space_allocation_limit =
2783 ExternalReference::new_space_allocation_limit_address(isolate());
2784 intptr_t top =
2785 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2786 intptr_t limit =
2787 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2788 ASSERT((limit - top) == kPointerSize);
2789
2790 // Set up allocation top address and object size registers.
2791 Register topaddr = scratch1;
2792 Register obj_size_reg = scratch2;
2793 li(topaddr, Operand(new_space_allocation_top));
2794 li(obj_size_reg, Operand(object_size));
2795
2796 // This code stores a temporary value in t9.
2797 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2798 // Load allocation top into result and allocation limit into t9.
2799 lw(result, MemOperand(topaddr));
2800 lw(t9, MemOperand(topaddr, kPointerSize));
2801 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002802 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002803 // Assert that result actually contains top on entry. t9 is used
2804 // immediately below so this use of t9 does not cause difference with
2805 // respect to register content between debug and release mode.
2806 lw(t9, MemOperand(topaddr));
2807 Check(eq, "Unexpected allocation top", result, Operand(t9));
2808 }
2809 // Load allocation limit into t9. Result already contains allocation top.
2810 lw(t9, MemOperand(topaddr, limit - top));
2811 }
2812
2813 // Calculate new top and bail out if new space is exhausted. Use result
2814 // to calculate the new top.
2815 Addu(scratch2, result, Operand(obj_size_reg));
2816 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2817 sw(scratch2, MemOperand(topaddr));
2818
2819 // Tag object if requested.
2820 if ((flags & TAG_OBJECT) != 0) {
2821 Addu(result, result, Operand(kHeapObjectTag));
2822 }
Steve Block6ded16b2010-05-10 14:33:55 +01002823}
2824
2825
Steve Block44f0eee2011-05-26 01:26:41 +01002826void MacroAssembler::AllocateInNewSpace(Register object_size,
2827 Register result,
2828 Register scratch1,
2829 Register scratch2,
2830 Label* gc_required,
2831 AllocationFlags flags) {
2832 if (!FLAG_inline_new) {
Ben Murdoch257744e2011-11-30 15:57:28 +00002833 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002834 // Trash the registers to simulate an allocation failure.
2835 li(result, 0x7091);
2836 li(scratch1, 0x7191);
2837 li(scratch2, 0x7291);
2838 }
2839 jmp(gc_required);
2840 return;
2841 }
2842
2843 ASSERT(!result.is(scratch1));
2844 ASSERT(!result.is(scratch2));
2845 ASSERT(!scratch1.is(scratch2));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002846 ASSERT(!object_size.is(t9));
Steve Block44f0eee2011-05-26 01:26:41 +01002847 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2848
2849 // Check relative positions of allocation top and limit addresses.
2850 // ARM adds additional checks to make sure the ldm instruction can be
2851 // used. On MIPS we don't have ldm so we don't need additional checks either.
2852 ExternalReference new_space_allocation_top =
2853 ExternalReference::new_space_allocation_top_address(isolate());
2854 ExternalReference new_space_allocation_limit =
2855 ExternalReference::new_space_allocation_limit_address(isolate());
2856 intptr_t top =
2857 reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2858 intptr_t limit =
2859 reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2860 ASSERT((limit - top) == kPointerSize);
2861
2862 // Set up allocation top address and object size registers.
2863 Register topaddr = scratch1;
2864 li(topaddr, Operand(new_space_allocation_top));
2865
2866 // This code stores a temporary value in t9.
2867 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2868 // Load allocation top into result and allocation limit into t9.
2869 lw(result, MemOperand(topaddr));
2870 lw(t9, MemOperand(topaddr, kPointerSize));
2871 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00002872 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002873 // Assert that result actually contains top on entry. t9 is used
2874 // immediately below so this use of t9 does not cause difference with
2875 // respect to register content between debug and release mode.
2876 lw(t9, MemOperand(topaddr));
2877 Check(eq, "Unexpected allocation top", result, Operand(t9));
2878 }
2879 // Load allocation limit into t9. Result already contains allocation top.
2880 lw(t9, MemOperand(topaddr, limit - top));
2881 }
2882
2883 // Calculate new top and bail out if new space is exhausted. Use result
2884 // to calculate the new top. Object size may be in words so a shift is
2885 // required to get the number of bytes.
2886 if ((flags & SIZE_IN_WORDS) != 0) {
2887 sll(scratch2, object_size, kPointerSizeLog2);
2888 Addu(scratch2, result, scratch2);
2889 } else {
2890 Addu(scratch2, result, Operand(object_size));
2891 }
2892 Branch(gc_required, Ugreater, scratch2, Operand(t9));
2893
2894 // Update allocation top. result temporarily holds the new top.
Ben Murdoch257744e2011-11-30 15:57:28 +00002895 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002896 And(t9, scratch2, Operand(kObjectAlignmentMask));
2897 Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
2898 }
2899 sw(scratch2, MemOperand(topaddr));
2900
2901 // Tag object if requested.
2902 if ((flags & TAG_OBJECT) != 0) {
2903 Addu(result, result, Operand(kHeapObjectTag));
2904 }
2905}
2906
2907
2908void MacroAssembler::UndoAllocationInNewSpace(Register object,
2909 Register scratch) {
2910 ExternalReference new_space_allocation_top =
2911 ExternalReference::new_space_allocation_top_address(isolate());
2912
2913 // Make sure the object has no tag before resetting top.
2914 And(object, object, Operand(~kHeapObjectTagMask));
2915#ifdef DEBUG
2916 // Check that the object un-allocated is below the current top.
2917 li(scratch, Operand(new_space_allocation_top));
2918 lw(scratch, MemOperand(scratch));
2919 Check(less, "Undo allocation of non allocated memory",
2920 object, Operand(scratch));
2921#endif
2922 // Write the address of the object to un-allocate as the current top.
2923 li(scratch, Operand(new_space_allocation_top));
2924 sw(object, MemOperand(scratch));
2925}
2926
2927
2928void MacroAssembler::AllocateTwoByteString(Register result,
2929 Register length,
2930 Register scratch1,
2931 Register scratch2,
2932 Register scratch3,
2933 Label* gc_required) {
2934 // Calculate the number of bytes needed for the characters in the string while
2935 // observing object alignment.
2936 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2937 sll(scratch1, length, 1); // Length in bytes, not chars.
2938 addiu(scratch1, scratch1,
2939 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
2940 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2941
2942 // Allocate two-byte string in new space.
2943 AllocateInNewSpace(scratch1,
2944 result,
2945 scratch2,
2946 scratch3,
2947 gc_required,
2948 TAG_OBJECT);
2949
2950 // Set the map, length and hash field.
2951 InitializeNewString(result,
2952 length,
2953 Heap::kStringMapRootIndex,
2954 scratch1,
2955 scratch2);
2956}
2957
2958
2959void MacroAssembler::AllocateAsciiString(Register result,
2960 Register length,
2961 Register scratch1,
2962 Register scratch2,
2963 Register scratch3,
2964 Label* gc_required) {
2965 // Calculate the number of bytes needed for the characters in the string
2966 // while observing object alignment.
2967 ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
2968 ASSERT(kCharSize == 1);
2969 addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
2970 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
2971
2972 // Allocate ASCII string in new space.
2973 AllocateInNewSpace(scratch1,
2974 result,
2975 scratch2,
2976 scratch3,
2977 gc_required,
2978 TAG_OBJECT);
2979
2980 // Set the map, length and hash field.
2981 InitializeNewString(result,
2982 length,
2983 Heap::kAsciiStringMapRootIndex,
2984 scratch1,
2985 scratch2);
2986}
2987
2988
2989void MacroAssembler::AllocateTwoByteConsString(Register result,
2990 Register length,
2991 Register scratch1,
2992 Register scratch2,
2993 Label* gc_required) {
2994 AllocateInNewSpace(ConsString::kSize,
2995 result,
2996 scratch1,
2997 scratch2,
2998 gc_required,
2999 TAG_OBJECT);
3000 InitializeNewString(result,
3001 length,
3002 Heap::kConsStringMapRootIndex,
3003 scratch1,
3004 scratch2);
3005}
3006
3007
3008void MacroAssembler::AllocateAsciiConsString(Register result,
3009 Register length,
3010 Register scratch1,
3011 Register scratch2,
3012 Label* gc_required) {
3013 AllocateInNewSpace(ConsString::kSize,
3014 result,
3015 scratch1,
3016 scratch2,
3017 gc_required,
3018 TAG_OBJECT);
3019 InitializeNewString(result,
3020 length,
3021 Heap::kConsAsciiStringMapRootIndex,
3022 scratch1,
3023 scratch2);
3024}
3025
3026
Ben Murdoch589d6972011-11-30 16:04:58 +00003027void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3028 Register length,
3029 Register scratch1,
3030 Register scratch2,
3031 Label* gc_required) {
3032 AllocateInNewSpace(SlicedString::kSize,
3033 result,
3034 scratch1,
3035 scratch2,
3036 gc_required,
3037 TAG_OBJECT);
3038
3039 InitializeNewString(result,
3040 length,
3041 Heap::kSlicedStringMapRootIndex,
3042 scratch1,
3043 scratch2);
3044}
3045
3046
3047void MacroAssembler::AllocateAsciiSlicedString(Register result,
3048 Register length,
3049 Register scratch1,
3050 Register scratch2,
3051 Label* gc_required) {
3052 AllocateInNewSpace(SlicedString::kSize,
3053 result,
3054 scratch1,
3055 scratch2,
3056 gc_required,
3057 TAG_OBJECT);
3058
3059 InitializeNewString(result,
3060 length,
3061 Heap::kSlicedAsciiStringMapRootIndex,
3062 scratch1,
3063 scratch2);
3064}
3065
3066
Steve Block44f0eee2011-05-26 01:26:41 +01003067// Allocates a heap number or jumps to the label if the young space is full and
3068// a scavenge is needed.
3069void MacroAssembler::AllocateHeapNumber(Register result,
3070 Register scratch1,
3071 Register scratch2,
3072 Register heap_number_map,
3073 Label* need_gc) {
3074 // Allocate an object in the heap for the heap number and tag it as a heap
3075 // object.
3076 AllocateInNewSpace(HeapNumber::kSize,
3077 result,
3078 scratch1,
3079 scratch2,
3080 need_gc,
3081 TAG_OBJECT);
3082
3083 // Store heap number map in the allocated object.
3084 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3085 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3086}
3087
3088
3089void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3090 FPURegister value,
3091 Register scratch1,
3092 Register scratch2,
3093 Label* gc_required) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003094 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3095 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
Steve Block44f0eee2011-05-26 01:26:41 +01003096 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3097}
3098
3099
3100// Copies a fixed number of fields of heap objects from src to dst.
3101void MacroAssembler::CopyFields(Register dst,
3102 Register src,
3103 RegList temps,
3104 int field_count) {
3105 ASSERT((temps & dst.bit()) == 0);
3106 ASSERT((temps & src.bit()) == 0);
3107 // Primitive implementation using only one temporary register.
3108
3109 Register tmp = no_reg;
3110 // Find a temp register in temps list.
3111 for (int i = 0; i < kNumRegisters; i++) {
3112 if ((temps & (1 << i)) != 0) {
3113 tmp.code_ = i;
3114 break;
3115 }
3116 }
3117 ASSERT(!tmp.is(no_reg));
3118
3119 for (int i = 0; i < field_count; i++) {
3120 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3121 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3122 }
3123}
3124
3125
Ben Murdoch257744e2011-11-30 15:57:28 +00003126void MacroAssembler::CopyBytes(Register src,
3127 Register dst,
3128 Register length,
3129 Register scratch) {
3130 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3131
3132 // Align src before copying in word size chunks.
3133 bind(&align_loop);
3134 Branch(&done, eq, length, Operand(zero_reg));
3135 bind(&align_loop_1);
3136 And(scratch, src, kPointerSize - 1);
3137 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3138 lbu(scratch, MemOperand(src));
3139 Addu(src, src, 1);
3140 sb(scratch, MemOperand(dst));
3141 Addu(dst, dst, 1);
3142 Subu(length, length, Operand(1));
3143 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3144
3145 // Copy bytes in word size chunks.
3146 bind(&word_loop);
3147 if (emit_debug_code()) {
3148 And(scratch, src, kPointerSize - 1);
3149 Assert(eq, "Expecting alignment for CopyBytes",
3150 scratch, Operand(zero_reg));
3151 }
3152 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3153 lw(scratch, MemOperand(src));
3154 Addu(src, src, kPointerSize);
3155
3156 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3157 // Can't use unaligned access - copy byte by byte.
3158 sb(scratch, MemOperand(dst, 0));
3159 srl(scratch, scratch, 8);
3160 sb(scratch, MemOperand(dst, 1));
3161 srl(scratch, scratch, 8);
3162 sb(scratch, MemOperand(dst, 2));
3163 srl(scratch, scratch, 8);
3164 sb(scratch, MemOperand(dst, 3));
3165 Addu(dst, dst, 4);
3166
3167 Subu(length, length, Operand(kPointerSize));
3168 Branch(&word_loop);
3169
3170 // Copy the last bytes if any left.
3171 bind(&byte_loop);
3172 Branch(&done, eq, length, Operand(zero_reg));
3173 bind(&byte_loop_1);
3174 lbu(scratch, MemOperand(src));
3175 Addu(src, src, 1);
3176 sb(scratch, MemOperand(dst));
3177 Addu(dst, dst, 1);
3178 Subu(length, length, Operand(1));
3179 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3180 bind(&done);
3181}
3182
3183
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003184void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3185 Register end_offset,
3186 Register filler) {
3187 Label loop, entry;
3188 Branch(&entry);
3189 bind(&loop);
3190 sw(filler, MemOperand(start_offset));
3191 Addu(start_offset, start_offset, kPointerSize);
3192 bind(&entry);
3193 Branch(&loop, lt, start_offset, Operand(end_offset));
3194}
3195
3196
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003197void MacroAssembler::CheckFastElements(Register map,
3198 Register scratch,
3199 Label* fail) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003200 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3201 STATIC_ASSERT(FAST_ELEMENTS == 1);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003202 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3203 Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
3204}
3205
3206
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003207void MacroAssembler::CheckFastObjectElements(Register map,
3208 Register scratch,
3209 Label* fail) {
3210 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3211 STATIC_ASSERT(FAST_ELEMENTS == 1);
3212 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3213 Branch(fail, ls, scratch,
3214 Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
3215 Branch(fail, hi, scratch,
3216 Operand(Map::kMaximumBitField2FastElementValue));
3217}
3218
3219
3220void MacroAssembler::CheckFastSmiOnlyElements(Register map,
3221 Register scratch,
3222 Label* fail) {
3223 STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3224 lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3225 Branch(fail, hi, scratch,
3226 Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
3227}
3228
3229
3230void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3231 Register key_reg,
3232 Register receiver_reg,
3233 Register elements_reg,
3234 Register scratch1,
3235 Register scratch2,
3236 Register scratch3,
3237 Register scratch4,
3238 Label* fail) {
3239 Label smi_value, maybe_nan, have_double_value, is_nan, done;
3240 Register mantissa_reg = scratch2;
3241 Register exponent_reg = scratch3;
3242
3243 // Handle smi values specially.
3244 JumpIfSmi(value_reg, &smi_value);
3245
3246 // Ensure that the object is a heap number
3247 CheckMap(value_reg,
3248 scratch1,
3249 isolate()->factory()->heap_number_map(),
3250 fail,
3251 DONT_DO_SMI_CHECK);
3252
3253 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3254 // in the exponent.
3255 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3256 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3257 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3258
3259 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3260
3261 bind(&have_double_value);
3262 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3263 Addu(scratch1, scratch1, elements_reg);
3264 sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3265 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
3266 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3267 jmp(&done);
3268
3269 bind(&maybe_nan);
3270 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3271 // it's an Infinity, and the non-NaN code path applies.
3272 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3273 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3274 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3275 bind(&is_nan);
3276 // Load canonical NaN for storing into the double array.
3277 uint64_t nan_int64 = BitCast<uint64_t>(
3278 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3279 li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3280 li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3281 jmp(&have_double_value);
3282
3283 bind(&smi_value);
3284 Addu(scratch1, elements_reg,
3285 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3286 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3287 Addu(scratch1, scratch1, scratch2);
3288 // scratch1 is now effective address of the double element
3289
3290 FloatingPointHelper::Destination destination;
3291 if (CpuFeatures::IsSupported(FPU)) {
3292 destination = FloatingPointHelper::kFPURegisters;
3293 } else {
3294 destination = FloatingPointHelper::kCoreRegisters;
3295 }
3296
3297 Register untagged_value = receiver_reg;
3298 SmiUntag(untagged_value, value_reg);
3299 FloatingPointHelper::ConvertIntToDouble(this,
3300 untagged_value,
3301 destination,
3302 f0,
3303 mantissa_reg,
3304 exponent_reg,
3305 scratch4,
3306 f2);
3307 if (destination == FloatingPointHelper::kFPURegisters) {
3308 CpuFeatures::Scope scope(FPU);
3309 sdc1(f0, MemOperand(scratch1, 0));
3310 } else {
3311 sw(mantissa_reg, MemOperand(scratch1, 0));
3312 sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
3313 }
3314 bind(&done);
3315}
3316
3317
Ben Murdochc7cc0282012-03-05 14:35:55 +00003318void MacroAssembler::CompareMapAndBranch(Register obj,
3319 Register scratch,
3320 Handle<Map> map,
3321 Label* early_success,
3322 Condition cond,
3323 Label* branch_to,
3324 CompareMapMode mode) {
3325 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3326 Operand right = Operand(map);
3327 if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
3328 Map* transitioned_fast_element_map(
3329 map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
3330 ASSERT(transitioned_fast_element_map == NULL ||
3331 map->elements_kind() != FAST_ELEMENTS);
3332 if (transitioned_fast_element_map != NULL) {
3333 Branch(early_success, eq, scratch, right);
3334 right = Operand(Handle<Map>(transitioned_fast_element_map));
3335 }
3336
3337 Map* transitioned_double_map(
3338 map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
3339 ASSERT(transitioned_double_map == NULL ||
3340 map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
3341 if (transitioned_double_map != NULL) {
3342 Branch(early_success, eq, scratch, right);
3343 right = Operand(Handle<Map>(transitioned_double_map));
3344 }
3345 }
3346
3347 Branch(branch_to, cond, scratch, right);
3348}
3349
3350
Steve Block44f0eee2011-05-26 01:26:41 +01003351void MacroAssembler::CheckMap(Register obj,
3352 Register scratch,
3353 Handle<Map> map,
3354 Label* fail,
Ben Murdochc7cc0282012-03-05 14:35:55 +00003355 SmiCheckType smi_check_type,
3356 CompareMapMode mode) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003357 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01003358 JumpIfSmi(obj, fail);
3359 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00003360 Label success;
3361 CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
3362 bind(&success);
Steve Block44f0eee2011-05-26 01:26:41 +01003363}
3364
3365
Ben Murdoch257744e2011-11-30 15:57:28 +00003366void MacroAssembler::DispatchMap(Register obj,
3367 Register scratch,
3368 Handle<Map> map,
3369 Handle<Code> success,
3370 SmiCheckType smi_check_type) {
3371 Label fail;
3372 if (smi_check_type == DO_SMI_CHECK) {
3373 JumpIfSmi(obj, &fail);
3374 }
3375 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3376 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3377 bind(&fail);
3378}
3379
3380
Steve Block44f0eee2011-05-26 01:26:41 +01003381void MacroAssembler::CheckMap(Register obj,
3382 Register scratch,
3383 Heap::RootListIndex index,
3384 Label* fail,
Ben Murdoch257744e2011-11-30 15:57:28 +00003385 SmiCheckType smi_check_type) {
3386 if (smi_check_type == DO_SMI_CHECK) {
Steve Block44f0eee2011-05-26 01:26:41 +01003387 JumpIfSmi(obj, fail);
3388 }
3389 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3390 LoadRoot(at, index);
3391 Branch(fail, ne, scratch, Operand(at));
Steve Block6ded16b2010-05-10 14:33:55 +01003392}
3393
3394
Ben Murdoch257744e2011-11-30 15:57:28 +00003395void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3396 CpuFeatures::Scope scope(FPU);
3397 if (IsMipsSoftFloatABI) {
3398 Move(dst, v0, v1);
3399 } else {
3400 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3401 }
3402}
3403
3404
3405void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3406 CpuFeatures::Scope scope(FPU);
3407 if (!IsMipsSoftFloatABI) {
3408 Move(f12, dreg);
3409 } else {
3410 Move(a0, a1, dreg);
3411 }
3412}
3413
3414
3415void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3416 DoubleRegister dreg2) {
3417 CpuFeatures::Scope scope(FPU);
3418 if (!IsMipsSoftFloatABI) {
3419 if (dreg2.is(f12)) {
3420 ASSERT(!dreg1.is(f14));
3421 Move(f14, dreg2);
3422 Move(f12, dreg1);
3423 } else {
3424 Move(f12, dreg1);
3425 Move(f14, dreg2);
3426 }
3427 } else {
3428 Move(a0, a1, dreg1);
3429 Move(a2, a3, dreg2);
3430 }
3431}
3432
3433
3434void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3435 Register reg) {
3436 CpuFeatures::Scope scope(FPU);
3437 if (!IsMipsSoftFloatABI) {
3438 Move(f12, dreg);
3439 Move(a2, reg);
3440 } else {
3441 Move(a2, reg);
3442 Move(a0, a1, dreg);
3443 }
3444}
3445
3446
3447void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3448 // This macro takes the dst register to make the code more readable
3449 // at the call sites. However, the dst register has to be t1 to
3450 // follow the calling convention which requires the call type to be
3451 // in t1.
3452 ASSERT(dst.is(t1));
3453 if (call_kind == CALL_AS_FUNCTION) {
3454 li(dst, Operand(Smi::FromInt(1)));
3455 } else {
3456 li(dst, Operand(Smi::FromInt(0)));
3457 }
3458}
3459
3460
Steve Block6ded16b2010-05-10 14:33:55 +01003461// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003462// JavaScript invokes.
Steve Block6ded16b2010-05-10 14:33:55 +01003463
3464void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3465 const ParameterCount& actual,
3466 Handle<Code> code_constant,
3467 Register code_reg,
3468 Label* done,
Ben Murdochc7cc0282012-03-05 14:35:55 +00003469 bool* definitely_mismatches,
Steve Block44f0eee2011-05-26 01:26:41 +01003470 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003471 const CallWrapper& call_wrapper,
3472 CallKind call_kind) {
Steve Block6ded16b2010-05-10 14:33:55 +01003473 bool definitely_matches = false;
Ben Murdochc7cc0282012-03-05 14:35:55 +00003474 *definitely_mismatches = false;
Steve Block6ded16b2010-05-10 14:33:55 +01003475 Label regular_invoke;
3476
3477 // Check whether the expected and actual arguments count match. If not,
3478 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3479 // a0: actual arguments count
3480 // a1: function (passed through to callee)
3481 // a2: expected arguments count
3482 // a3: callee code entry
3483
3484 // The code below is made a lot easier because the calling code already sets
3485 // up actual and expected registers according to the contract if values are
3486 // passed in registers.
3487 ASSERT(actual.is_immediate() || actual.reg().is(a0));
3488 ASSERT(expected.is_immediate() || expected.reg().is(a2));
3489 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3490
3491 if (expected.is_immediate()) {
3492 ASSERT(actual.is_immediate());
3493 if (expected.immediate() == actual.immediate()) {
3494 definitely_matches = true;
3495 } else {
3496 li(a0, Operand(actual.immediate()));
3497 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3498 if (expected.immediate() == sentinel) {
3499 // Don't worry about adapting arguments for builtins that
3500 // don't want that done. Skip adaption code by making it look
3501 // like we have a match between expected and actual number of
3502 // arguments.
3503 definitely_matches = true;
3504 } else {
Ben Murdochc7cc0282012-03-05 14:35:55 +00003505 *definitely_mismatches = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003506 li(a2, Operand(expected.immediate()));
3507 }
3508 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003509 } else if (actual.is_immediate()) {
3510 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3511 li(a0, Operand(actual.immediate()));
Steve Block6ded16b2010-05-10 14:33:55 +01003512 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003513 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
Steve Block6ded16b2010-05-10 14:33:55 +01003514 }
3515
3516 if (!definitely_matches) {
3517 if (!code_constant.is_null()) {
3518 li(a3, Operand(code_constant));
3519 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3520 }
3521
Steve Block44f0eee2011-05-26 01:26:41 +01003522 Handle<Code> adaptor =
3523 isolate()->builtins()->ArgumentsAdaptorTrampoline();
Steve Block6ded16b2010-05-10 14:33:55 +01003524 if (flag == CALL_FUNCTION) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003525 call_wrapper.BeforeCall(CallSize(adaptor));
Ben Murdoch257744e2011-11-30 15:57:28 +00003526 SetCallKind(t1, call_kind);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003527 Call(adaptor);
Ben Murdoch257744e2011-11-30 15:57:28 +00003528 call_wrapper.AfterCall();
Ben Murdochc7cc0282012-03-05 14:35:55 +00003529 if (!*definitely_mismatches) {
3530 Branch(done);
3531 }
Steve Block6ded16b2010-05-10 14:33:55 +01003532 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00003533 SetCallKind(t1, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003534 Jump(adaptor, RelocInfo::CODE_TARGET);
Steve Block6ded16b2010-05-10 14:33:55 +01003535 }
3536 bind(&regular_invoke);
3537 }
3538}
3539
Steve Block44f0eee2011-05-26 01:26:41 +01003540
Steve Block6ded16b2010-05-10 14:33:55 +01003541void MacroAssembler::InvokeCode(Register code,
3542 const ParameterCount& expected,
3543 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003544 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003545 const CallWrapper& call_wrapper,
3546 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003547 // You can't call a function without a valid frame.
3548 ASSERT(flag == JUMP_FUNCTION || has_frame());
3549
Steve Block6ded16b2010-05-10 14:33:55 +01003550 Label done;
3551
Ben Murdochc7cc0282012-03-05 14:35:55 +00003552 bool definitely_mismatches = false;
3553 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3554 &done, &definitely_mismatches, flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003555 call_wrapper, call_kind);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003556 if (!definitely_mismatches) {
3557 if (flag == CALL_FUNCTION) {
3558 call_wrapper.BeforeCall(CallSize(code));
3559 SetCallKind(t1, call_kind);
3560 Call(code);
3561 call_wrapper.AfterCall();
3562 } else {
3563 ASSERT(flag == JUMP_FUNCTION);
3564 SetCallKind(t1, call_kind);
3565 Jump(code);
3566 }
3567 // Continue here if InvokePrologue does handle the invocation due to
3568 // mismatched parameter counts.
3569 bind(&done);
Steve Block6ded16b2010-05-10 14:33:55 +01003570 }
Steve Block6ded16b2010-05-10 14:33:55 +01003571}
3572
3573
3574void MacroAssembler::InvokeCode(Handle<Code> code,
3575 const ParameterCount& expected,
3576 const ParameterCount& actual,
3577 RelocInfo::Mode rmode,
Ben Murdoch257744e2011-11-30 15:57:28 +00003578 InvokeFlag flag,
3579 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003580 // You can't call a function without a valid frame.
3581 ASSERT(flag == JUMP_FUNCTION || has_frame());
3582
Steve Block6ded16b2010-05-10 14:33:55 +01003583 Label done;
3584
Ben Murdochc7cc0282012-03-05 14:35:55 +00003585 bool definitely_mismatches = false;
3586 InvokePrologue(expected, actual, code, no_reg,
3587 &done, &definitely_mismatches, flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003588 NullCallWrapper(), call_kind);
Ben Murdochc7cc0282012-03-05 14:35:55 +00003589 if (!definitely_mismatches) {
3590 if (flag == CALL_FUNCTION) {
3591 SetCallKind(t1, call_kind);
3592 Call(code, rmode);
3593 } else {
3594 SetCallKind(t1, call_kind);
3595 Jump(code, rmode);
3596 }
3597 // Continue here if InvokePrologue does handle the invocation due to
3598 // mismatched parameter counts.
3599 bind(&done);
Steve Block6ded16b2010-05-10 14:33:55 +01003600 }
Steve Block6ded16b2010-05-10 14:33:55 +01003601}
3602
3603
3604void MacroAssembler::InvokeFunction(Register function,
3605 const ParameterCount& actual,
Steve Block44f0eee2011-05-26 01:26:41 +01003606 InvokeFlag flag,
Ben Murdoch257744e2011-11-30 15:57:28 +00003607 const CallWrapper& call_wrapper,
3608 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003609 // You can't call a function without a valid frame.
3610 ASSERT(flag == JUMP_FUNCTION || has_frame());
3611
Steve Block6ded16b2010-05-10 14:33:55 +01003612 // Contract with called JS functions requires that function is passed in a1.
3613 ASSERT(function.is(a1));
3614 Register expected_reg = a2;
3615 Register code_reg = a3;
3616
3617 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3618 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3619 lw(expected_reg,
3620 FieldMemOperand(code_reg,
3621 SharedFunctionInfo::kFormalParameterCountOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01003622 sra(expected_reg, expected_reg, kSmiTagSize);
3623 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01003624
3625 ParameterCount expected(expected_reg);
Ben Murdoch257744e2011-11-30 15:57:28 +00003626 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003627}
3628
3629
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003630void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
Steve Block44f0eee2011-05-26 01:26:41 +01003631 const ParameterCount& actual,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003632 InvokeFlag flag,
Ben Murdochc7cc0282012-03-05 14:35:55 +00003633 const CallWrapper& call_wrapper,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003634 CallKind call_kind) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003635 // You can't call a function without a valid frame.
3636 ASSERT(flag == JUMP_FUNCTION || has_frame());
Steve Block44f0eee2011-05-26 01:26:41 +01003637
3638 // Get the function and setup the context.
Ben Murdochc7cc0282012-03-05 14:35:55 +00003639 LoadHeapObject(a1, function);
Steve Block44f0eee2011-05-26 01:26:41 +01003640 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3641
Steve Block44f0eee2011-05-26 01:26:41 +01003642 ParameterCount expected(function->shared()->formal_parameter_count());
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003643 // We call indirectly through the code field in the function to
3644 // allow recompilation to take effect without changing any of the
3645 // call sites.
3646 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Ben Murdochc7cc0282012-03-05 14:35:55 +00003647 InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
Steve Block44f0eee2011-05-26 01:26:41 +01003648}
3649
3650
3651void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3652 Register map,
3653 Register scratch,
3654 Label* fail) {
3655 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3656 IsInstanceJSObjectType(map, scratch, fail);
3657}
3658
3659
3660void MacroAssembler::IsInstanceJSObjectType(Register map,
3661 Register scratch,
3662 Label* fail) {
3663 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003664 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3665 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
Steve Block44f0eee2011-05-26 01:26:41 +01003666}
3667
3668
3669void MacroAssembler::IsObjectJSStringType(Register object,
3670 Register scratch,
3671 Label* fail) {
3672 ASSERT(kNotStringTag != 0);
3673
3674 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3675 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3676 And(scratch, scratch, Operand(kIsNotStringMask));
3677 Branch(fail, ne, scratch, Operand(zero_reg));
Steve Block6ded16b2010-05-10 14:33:55 +01003678}
3679
3680
3681// ---------------------------------------------------------------------------
3682// Support functions.
3683
Steve Block44f0eee2011-05-26 01:26:41 +01003684
3685void MacroAssembler::TryGetFunctionPrototype(Register function,
3686 Register result,
3687 Register scratch,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003688 Label* miss,
3689 bool miss_on_bound_function) {
Steve Block44f0eee2011-05-26 01:26:41 +01003690 // Check that the receiver isn't a smi.
3691 JumpIfSmi(function, miss);
3692
3693 // Check that the function really is a function. Load map into result reg.
3694 GetObjectType(function, result, scratch);
3695 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3696
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003697 if (miss_on_bound_function) {
3698 lw(scratch,
3699 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3700 lw(scratch,
3701 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3702 And(scratch, scratch,
3703 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3704 Branch(miss, ne, scratch, Operand(zero_reg));
3705 }
3706
Steve Block44f0eee2011-05-26 01:26:41 +01003707 // Make sure that the function has an instance prototype.
3708 Label non_instance;
3709 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3710 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3711 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3712
3713 // Get the prototype or initial map from the function.
3714 lw(result,
3715 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3716
3717 // If the prototype or initial map is the hole, don't return it and
3718 // simply miss the cache instead. This will allow us to allocate a
3719 // prototype object on-demand in the runtime system.
3720 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3721 Branch(miss, eq, result, Operand(t8));
3722
3723 // If the function does not have an initial map, we're done.
3724 Label done;
3725 GetObjectType(result, scratch, scratch);
3726 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3727
3728 // Get the prototype from the initial map.
3729 lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3730 jmp(&done);
3731
3732 // Non-instance prototype: Fetch prototype from constructor field
3733 // in initial map.
3734 bind(&non_instance);
3735 lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3736
3737 // All done.
3738 bind(&done);
3739}
Steve Block6ded16b2010-05-10 14:33:55 +01003740
3741
Steve Block44f0eee2011-05-26 01:26:41 +01003742void MacroAssembler::GetObjectType(Register object,
3743 Register map,
3744 Register type_reg) {
3745 lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3746 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3747}
Steve Block6ded16b2010-05-10 14:33:55 +01003748
3749
3750// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00003751// Runtime calls.
Steve Block6ded16b2010-05-10 14:33:55 +01003752
Andrei Popescu31002712010-02-23 13:46:05 +00003753void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
3754 Register r1, const Operand& r2) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003755 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003756 Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
Andrei Popescu31002712010-02-23 13:46:05 +00003757}
3758
3759
Steve Block44f0eee2011-05-26 01:26:41 +01003760void MacroAssembler::TailCallStub(CodeStub* stub) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003761 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Steve Block44f0eee2011-05-26 01:26:41 +01003762 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00003763}
3764
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003765
Ben Murdoch257744e2011-11-30 15:57:28 +00003766static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3767 return ref0.address() - ref1.address();
3768}
3769
3770
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003771void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
3772 int stack_space) {
Ben Murdoch257744e2011-11-30 15:57:28 +00003773 ExternalReference next_address =
3774 ExternalReference::handle_scope_next_address();
3775 const int kNextOffset = 0;
3776 const int kLimitOffset = AddressOffset(
3777 ExternalReference::handle_scope_limit_address(),
3778 next_address);
3779 const int kLevelOffset = AddressOffset(
3780 ExternalReference::handle_scope_level_address(),
3781 next_address);
3782
3783 // Allocate HandleScope in callee-save registers.
3784 li(s3, Operand(next_address));
3785 lw(s0, MemOperand(s3, kNextOffset));
3786 lw(s1, MemOperand(s3, kLimitOffset));
3787 lw(s2, MemOperand(s3, kLevelOffset));
3788 Addu(s2, s2, Operand(1));
3789 sw(s2, MemOperand(s3, kLevelOffset));
3790
3791 // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3792 // (4 bytes) will be placed. This is also built into the Simulator.
3793 // Set up the pointer to the returned value (a0). It was allocated in
3794 // EnterExitFrame.
3795 addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3796
3797 // Native call returns to the DirectCEntry stub which redirects to the
3798 // return address pushed on stack (could have moved after GC).
3799 // DirectCEntry stub itself is generated early and never moves.
3800 DirectCEntryStub stub;
3801 stub.GenerateCall(this, function);
3802
3803 // As mentioned above, on MIPS a pointer is returned - we need to dereference
3804 // it to get the actual return value (which is also a pointer).
3805 lw(v0, MemOperand(v0));
3806
3807 Label promote_scheduled_exception;
3808 Label delete_allocated_handles;
3809 Label leave_exit_frame;
3810
3811 // If result is non-zero, dereference to get the result value
3812 // otherwise set it to undefined.
3813 Label skip;
3814 LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3815 Branch(&skip, eq, v0, Operand(zero_reg));
3816 lw(a0, MemOperand(v0));
3817 bind(&skip);
3818 mov(v0, a0);
3819
3820 // No more valid handles (the result handle was the last one). Restore
3821 // previous handle scope.
3822 sw(s0, MemOperand(s3, kNextOffset));
3823 if (emit_debug_code()) {
3824 lw(a1, MemOperand(s3, kLevelOffset));
3825 Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3826 }
3827 Subu(s2, s2, Operand(1));
3828 sw(s2, MemOperand(s3, kLevelOffset));
3829 lw(at, MemOperand(s3, kLimitOffset));
3830 Branch(&delete_allocated_handles, ne, s1, Operand(at));
3831
3832 // Check if the function scheduled an exception.
3833 bind(&leave_exit_frame);
3834 LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3835 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3836 lw(t1, MemOperand(at));
3837 Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3838 li(s0, Operand(stack_space));
3839 LeaveExitFrame(false, s0);
3840 Ret();
3841
3842 bind(&promote_scheduled_exception);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003843 TailCallExternalReference(
3844 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
3845 0,
3846 1);
Ben Murdoch257744e2011-11-30 15:57:28 +00003847
3848 // HandleScope limit has changed. Delete allocated extensions.
3849 bind(&delete_allocated_handles);
3850 sw(s1, MemOperand(s3, kLimitOffset));
3851 mov(s0, v0);
3852 mov(a0, v0);
3853 PrepareCallCFunction(1, s1);
3854 li(a0, Operand(ExternalReference::isolate_address()));
3855 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3856 1);
3857 mov(v0, s0);
3858 jmp(&leave_exit_frame);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003859}
Ben Murdoch257744e2011-11-30 15:57:28 +00003860
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003861
3862bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3863 if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
3864 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
Ben Murdoch257744e2011-11-30 15:57:28 +00003865}
3866
Andrei Popescu31002712010-02-23 13:46:05 +00003867
Steve Block6ded16b2010-05-10 14:33:55 +01003868void MacroAssembler::IllegalOperation(int num_arguments) {
3869 if (num_arguments > 0) {
3870 addiu(sp, sp, num_arguments * kPointerSize);
3871 }
3872 LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3873}
3874
3875
Steve Block44f0eee2011-05-26 01:26:41 +01003876void MacroAssembler::IndexFromHash(Register hash,
3877 Register index) {
3878 // If the hash field contains an array index pick it out. The assert checks
3879 // that the constants for the maximum number of digits for an array index
3880 // cached in the hash field and the number of bits reserved for it does not
3881 // conflict.
3882 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3883 (1 << String::kArrayIndexValueBits));
3884 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3885 // the low kHashShift bits.
3886 STATIC_ASSERT(kSmiTag == 0);
3887 Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3888 sll(index, hash, kSmiTagSize);
3889}
3890
3891
3892void MacroAssembler::ObjectToDoubleFPURegister(Register object,
3893 FPURegister result,
3894 Register scratch1,
3895 Register scratch2,
3896 Register heap_number_map,
3897 Label* not_number,
3898 ObjectToDoubleFlags flags) {
3899 Label done;
3900 if ((flags & OBJECT_NOT_SMI) == 0) {
3901 Label not_smi;
3902 JumpIfNotSmi(object, &not_smi);
3903 // Remove smi tag and convert to double.
3904 sra(scratch1, object, kSmiTagSize);
3905 mtc1(scratch1, result);
3906 cvt_d_w(result, result);
3907 Branch(&done);
3908 bind(&not_smi);
3909 }
3910 // Check for heap number and load double value from it.
3911 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
3912 Branch(not_number, ne, scratch1, Operand(heap_number_map));
3913
3914 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
3915 // If exponent is all ones the number is either a NaN or +/-Infinity.
3916 Register exponent = scratch1;
3917 Register mask_reg = scratch2;
3918 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
3919 li(mask_reg, HeapNumber::kExponentMask);
3920
3921 And(exponent, exponent, mask_reg);
3922 Branch(not_number, eq, exponent, Operand(mask_reg));
3923 }
3924 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
3925 bind(&done);
3926}
3927
3928
Steve Block44f0eee2011-05-26 01:26:41 +01003929void MacroAssembler::SmiToDoubleFPURegister(Register smi,
3930 FPURegister value,
3931 Register scratch1) {
3932 sra(scratch1, smi, kSmiTagSize);
3933 mtc1(scratch1, value);
3934 cvt_d_w(value, value);
3935}
3936
3937
Ben Murdoch257744e2011-11-30 15:57:28 +00003938void MacroAssembler::AdduAndCheckForOverflow(Register dst,
3939 Register left,
3940 Register right,
3941 Register overflow_dst,
3942 Register scratch) {
3943 ASSERT(!dst.is(overflow_dst));
3944 ASSERT(!dst.is(scratch));
3945 ASSERT(!overflow_dst.is(scratch));
3946 ASSERT(!overflow_dst.is(left));
3947 ASSERT(!overflow_dst.is(right));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003948
3949 if (left.is(right) && dst.is(left)) {
3950 ASSERT(!dst.is(t9));
3951 ASSERT(!scratch.is(t9));
3952 ASSERT(!left.is(t9));
3953 ASSERT(!right.is(t9));
3954 ASSERT(!overflow_dst.is(t9));
3955 mov(t9, right);
3956 right = t9;
3957 }
Ben Murdoch257744e2011-11-30 15:57:28 +00003958
Ben Murdoch257744e2011-11-30 15:57:28 +00003959 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003960 mov(scratch, left); // Preserve left.
3961 addu(dst, left, right); // Left is overwritten.
3962 xor_(scratch, dst, scratch); // Original left.
3963 xor_(overflow_dst, dst, right);
3964 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003965 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003966 mov(scratch, right); // Preserve right.
3967 addu(dst, left, right); // Right is overwritten.
3968 xor_(scratch, dst, scratch); // Original right.
3969 xor_(overflow_dst, dst, left);
3970 and_(overflow_dst, overflow_dst, scratch);
Ben Murdoch257744e2011-11-30 15:57:28 +00003971 } else {
3972 addu(dst, left, right);
3973 xor_(overflow_dst, dst, left);
3974 xor_(scratch, dst, right);
3975 and_(overflow_dst, scratch, overflow_dst);
3976 }
3977}
3978
3979
3980void MacroAssembler::SubuAndCheckForOverflow(Register dst,
3981 Register left,
3982 Register right,
3983 Register overflow_dst,
3984 Register scratch) {
3985 ASSERT(!dst.is(overflow_dst));
3986 ASSERT(!dst.is(scratch));
3987 ASSERT(!overflow_dst.is(scratch));
3988 ASSERT(!overflow_dst.is(left));
3989 ASSERT(!overflow_dst.is(right));
Ben Murdoch257744e2011-11-30 15:57:28 +00003990 ASSERT(!scratch.is(left));
3991 ASSERT(!scratch.is(right));
3992
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003993 // This happens with some crankshaft code. Since Subu works fine if
3994 // left == right, let's not make that restriction here.
3995 if (left.is(right)) {
3996 mov(dst, zero_reg);
3997 mov(overflow_dst, zero_reg);
3998 return;
3999 }
4000
Ben Murdoch257744e2011-11-30 15:57:28 +00004001 if (dst.is(left)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004002 mov(scratch, left); // Preserve left.
4003 subu(dst, left, right); // Left is overwritten.
4004 xor_(overflow_dst, dst, scratch); // scratch is original left.
4005 xor_(scratch, scratch, right); // scratch is original left.
4006 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00004007 } else if (dst.is(right)) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004008 mov(scratch, right); // Preserve right.
4009 subu(dst, left, right); // Right is overwritten.
4010 xor_(overflow_dst, dst, left);
4011 xor_(scratch, left, scratch); // Original right.
4012 and_(overflow_dst, scratch, overflow_dst);
Ben Murdoch257744e2011-11-30 15:57:28 +00004013 } else {
4014 subu(dst, left, right);
4015 xor_(overflow_dst, dst, left);
4016 xor_(scratch, left, right);
4017 and_(overflow_dst, scratch, overflow_dst);
4018 }
4019}
4020
4021
Steve Block44f0eee2011-05-26 01:26:41 +01004022void MacroAssembler::CallRuntime(const Runtime::Function* f,
4023 int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01004024 // All parameters are on the stack. v0 has the return value after call.
4025
4026 // If the expected number of arguments of the runtime function is
4027 // constant, we check that the actual number of arguments match the
4028 // expectation.
4029 if (f->nargs >= 0 && f->nargs != num_arguments) {
4030 IllegalOperation(num_arguments);
4031 return;
4032 }
4033
4034 // TODO(1236192): Most runtime routines don't need the number of
4035 // arguments passed in because it is constant. At some point we
4036 // should remove this need and make the runtime routine entry code
4037 // smarter.
4038 li(a0, num_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004039 li(a1, Operand(ExternalReference(f, isolate())));
Steve Block6ded16b2010-05-10 14:33:55 +01004040 CEntryStub stub(1);
4041 CallStub(&stub);
Andrei Popescu31002712010-02-23 13:46:05 +00004042}
4043
4044
Steve Block44f0eee2011-05-26 01:26:41 +01004045void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
4046 const Runtime::Function* function = Runtime::FunctionForId(id);
4047 li(a0, Operand(function->nargs));
4048 li(a1, Operand(ExternalReference(function, isolate())));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004049 CEntryStub stub(1, kSaveFPRegs);
Steve Block44f0eee2011-05-26 01:26:41 +01004050 CallStub(&stub);
4051}
4052
4053
Andrei Popescu31002712010-02-23 13:46:05 +00004054void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
Steve Block6ded16b2010-05-10 14:33:55 +01004055 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
4056}
4057
4058
Steve Block44f0eee2011-05-26 01:26:41 +01004059void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4060 int num_arguments) {
4061 li(a0, Operand(num_arguments));
4062 li(a1, Operand(ext));
4063
4064 CEntryStub stub(1);
4065 CallStub(&stub);
4066}
4067
4068
Steve Block6ded16b2010-05-10 14:33:55 +01004069void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4070 int num_arguments,
4071 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01004072 // TODO(1236192): Most runtime routines don't need the number of
4073 // arguments passed in because it is constant. At some point we
4074 // should remove this need and make the runtime routine entry code
4075 // smarter.
4076 li(a0, Operand(num_arguments));
4077 JumpToExternalReference(ext);
Andrei Popescu31002712010-02-23 13:46:05 +00004078}
4079
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004080
Steve Block6ded16b2010-05-10 14:33:55 +01004081void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
Andrei Popescu31002712010-02-23 13:46:05 +00004082 int num_arguments,
4083 int result_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01004084 TailCallExternalReference(ExternalReference(fid, isolate()),
4085 num_arguments,
4086 result_size);
Andrei Popescu31002712010-02-23 13:46:05 +00004087}
4088
4089
Steve Block6ded16b2010-05-10 14:33:55 +01004090void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Steve Block44f0eee2011-05-26 01:26:41 +01004091 li(a1, Operand(builtin));
4092 CEntryStub stub(1);
4093 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
Andrei Popescu31002712010-02-23 13:46:05 +00004094}
4095
4096
4097void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Ben Murdoch257744e2011-11-30 15:57:28 +00004098 InvokeFlag flag,
4099 const CallWrapper& call_wrapper) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004100 // You can't call a builtin without a valid frame.
4101 ASSERT(flag == JUMP_FUNCTION || has_frame());
4102
Steve Block44f0eee2011-05-26 01:26:41 +01004103 GetBuiltinEntry(t9, id);
Ben Murdoch257744e2011-11-30 15:57:28 +00004104 if (flag == CALL_FUNCTION) {
4105 call_wrapper.BeforeCall(CallSize(t9));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004106 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01004107 Call(t9);
Ben Murdoch257744e2011-11-30 15:57:28 +00004108 call_wrapper.AfterCall();
Steve Block44f0eee2011-05-26 01:26:41 +01004109 } else {
Ben Murdoch257744e2011-11-30 15:57:28 +00004110 ASSERT(flag == JUMP_FUNCTION);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004111 SetCallKind(t1, CALL_AS_METHOD);
Steve Block44f0eee2011-05-26 01:26:41 +01004112 Jump(t9);
4113 }
4114}
4115
4116
4117void MacroAssembler::GetBuiltinFunction(Register target,
4118 Builtins::JavaScript id) {
4119 // Load the builtins object into target register.
4120 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4121 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4122 // Load the JavaScript builtin function from the builtins object.
4123 lw(target, FieldMemOperand(target,
4124 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
Andrei Popescu31002712010-02-23 13:46:05 +00004125}
4126
4127
4128void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Steve Block44f0eee2011-05-26 01:26:41 +01004129 ASSERT(!target.is(a1));
4130 GetBuiltinFunction(a1, id);
4131 // Load the code entry point from the builtins object.
4132 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
Andrei Popescu31002712010-02-23 13:46:05 +00004133}
4134
4135
4136void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4137 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01004138 if (FLAG_native_code_counters && counter->Enabled()) {
4139 li(scratch1, Operand(value));
4140 li(scratch2, Operand(ExternalReference(counter)));
4141 sw(scratch1, MemOperand(scratch2));
4142 }
Andrei Popescu31002712010-02-23 13:46:05 +00004143}
4144
4145
4146void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4147 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01004148 ASSERT(value > 0);
4149 if (FLAG_native_code_counters && counter->Enabled()) {
4150 li(scratch2, Operand(ExternalReference(counter)));
4151 lw(scratch1, MemOperand(scratch2));
4152 Addu(scratch1, scratch1, Operand(value));
4153 sw(scratch1, MemOperand(scratch2));
4154 }
Andrei Popescu31002712010-02-23 13:46:05 +00004155}
4156
4157
4158void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4159 Register scratch1, Register scratch2) {
Steve Block44f0eee2011-05-26 01:26:41 +01004160 ASSERT(value > 0);
4161 if (FLAG_native_code_counters && counter->Enabled()) {
4162 li(scratch2, Operand(ExternalReference(counter)));
4163 lw(scratch1, MemOperand(scratch2));
4164 Subu(scratch1, scratch1, Operand(value));
4165 sw(scratch1, MemOperand(scratch2));
4166 }
Andrei Popescu31002712010-02-23 13:46:05 +00004167}
4168
4169
Steve Block6ded16b2010-05-10 14:33:55 +01004170// -----------------------------------------------------------------------------
Ben Murdoch257744e2011-11-30 15:57:28 +00004171// Debugging.
Andrei Popescu31002712010-02-23 13:46:05 +00004172
4173void MacroAssembler::Assert(Condition cc, const char* msg,
4174 Register rs, Operand rt) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004175 if (emit_debug_code())
Steve Block44f0eee2011-05-26 01:26:41 +01004176 Check(cc, msg, rs, rt);
4177}
4178
4179
4180void MacroAssembler::AssertRegisterIsRoot(Register reg,
4181 Heap::RootListIndex index) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004182 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01004183 LoadRoot(at, index);
4184 Check(eq, "Register did not match expected root", reg, Operand(at));
4185 }
4186}
4187
4188
4189void MacroAssembler::AssertFastElements(Register elements) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004190 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01004191 ASSERT(!elements.is(at));
4192 Label ok;
Ben Murdoch257744e2011-11-30 15:57:28 +00004193 push(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01004194 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4195 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4196 Branch(&ok, eq, elements, Operand(at));
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004197 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4198 Branch(&ok, eq, elements, Operand(at));
Steve Block44f0eee2011-05-26 01:26:41 +01004199 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4200 Branch(&ok, eq, elements, Operand(at));
4201 Abort("JSObject with fast elements map has slow elements");
4202 bind(&ok);
Ben Murdoch257744e2011-11-30 15:57:28 +00004203 pop(elements);
Steve Block44f0eee2011-05-26 01:26:41 +01004204 }
Andrei Popescu31002712010-02-23 13:46:05 +00004205}
4206
4207
4208void MacroAssembler::Check(Condition cc, const char* msg,
4209 Register rs, Operand rt) {
Steve Block44f0eee2011-05-26 01:26:41 +01004210 Label L;
4211 Branch(&L, cc, rs, rt);
4212 Abort(msg);
Ben Murdoch257744e2011-11-30 15:57:28 +00004213 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01004214 bind(&L);
Andrei Popescu31002712010-02-23 13:46:05 +00004215}
4216
4217
4218void MacroAssembler::Abort(const char* msg) {
Steve Block44f0eee2011-05-26 01:26:41 +01004219 Label abort_start;
4220 bind(&abort_start);
4221 // We want to pass the msg string like a smi to avoid GC
4222 // problems, however msg is not guaranteed to be aligned
4223 // properly. Instead, we pass an aligned pointer that is
4224 // a proper v8 smi, but also pass the alignment difference
4225 // from the real pointer as a smi.
4226 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4227 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4228 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4229#ifdef DEBUG
4230 if (msg != NULL) {
4231 RecordComment("Abort message: ");
4232 RecordComment(msg);
4233 }
4234#endif
Steve Block44f0eee2011-05-26 01:26:41 +01004235
4236 li(a0, Operand(p0));
Ben Murdoch257744e2011-11-30 15:57:28 +00004237 push(a0);
Steve Block44f0eee2011-05-26 01:26:41 +01004238 li(a0, Operand(Smi::FromInt(p1 - p0)));
Ben Murdoch257744e2011-11-30 15:57:28 +00004239 push(a0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004240 // Disable stub call restrictions to always allow calls to abort.
4241 if (!has_frame_) {
4242 // We don't actually want to generate a pile of code for this, so just
4243 // claim there is a stack frame, without generating one.
4244 FrameScope scope(this, StackFrame::NONE);
4245 CallRuntime(Runtime::kAbort, 2);
4246 } else {
4247 CallRuntime(Runtime::kAbort, 2);
4248 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004249 // Will not return here.
Steve Block44f0eee2011-05-26 01:26:41 +01004250 if (is_trampoline_pool_blocked()) {
4251 // If the calling code cares about the exact number of
4252 // instructions generated, we insert padding here to keep the size
4253 // of the Abort macro constant.
4254 // Currently in debug mode with debug_code enabled the number of
4255 // generated instructions is 14, so we use this as a maximum value.
4256 static const int kExpectedAbortInstructions = 14;
4257 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4258 ASSERT(abort_instructions <= kExpectedAbortInstructions);
4259 while (abort_instructions++ < kExpectedAbortInstructions) {
4260 nop();
4261 }
4262 }
4263}
4264
4265
4266void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4267 if (context_chain_length > 0) {
4268 // Move up the chain of contexts to the context containing the slot.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004269 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01004270 for (int i = 1; i < context_chain_length; i++) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004271 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
Steve Block44f0eee2011-05-26 01:26:41 +01004272 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004273 } else {
4274 // Slot is in the current function context. Move it into the
4275 // destination register in case we store into it (the write barrier
4276 // cannot be allowed to destroy the context in esi).
4277 Move(dst, cp);
4278 }
Steve Block44f0eee2011-05-26 01:26:41 +01004279}
4280
4281
4282void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4283 // Load the global or builtins object from the current context.
4284 lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4285 // Load the global context from the global or builtins object.
4286 lw(function, FieldMemOperand(function,
4287 GlobalObject::kGlobalContextOffset));
4288 // Load the function from the global context.
4289 lw(function, MemOperand(function, Context::SlotOffset(index)));
4290}
4291
4292
4293void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4294 Register map,
4295 Register scratch) {
4296 // Load the initial map. The global functions all have initial maps.
4297 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
Ben Murdoch257744e2011-11-30 15:57:28 +00004298 if (emit_debug_code()) {
Steve Block44f0eee2011-05-26 01:26:41 +01004299 Label ok, fail;
Ben Murdoch257744e2011-11-30 15:57:28 +00004300 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Steve Block44f0eee2011-05-26 01:26:41 +01004301 Branch(&ok);
4302 bind(&fail);
4303 Abort("Global functions must have initial map");
4304 bind(&ok);
4305 }
Andrei Popescu31002712010-02-23 13:46:05 +00004306}
4307
Steve Block6ded16b2010-05-10 14:33:55 +01004308
4309void MacroAssembler::EnterFrame(StackFrame::Type type) {
4310 addiu(sp, sp, -5 * kPointerSize);
Steve Block44f0eee2011-05-26 01:26:41 +01004311 li(t8, Operand(Smi::FromInt(type)));
4312 li(t9, Operand(CodeObject()));
Steve Block6ded16b2010-05-10 14:33:55 +01004313 sw(ra, MemOperand(sp, 4 * kPointerSize));
4314 sw(fp, MemOperand(sp, 3 * kPointerSize));
4315 sw(cp, MemOperand(sp, 2 * kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004316 sw(t8, MemOperand(sp, 1 * kPointerSize));
4317 sw(t9, MemOperand(sp, 0 * kPointerSize));
Steve Block6ded16b2010-05-10 14:33:55 +01004318 addiu(fp, sp, 3 * kPointerSize);
4319}
4320
4321
4322void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4323 mov(sp, fp);
4324 lw(fp, MemOperand(sp, 0 * kPointerSize));
4325 lw(ra, MemOperand(sp, 1 * kPointerSize));
4326 addiu(sp, sp, 2 * kPointerSize);
4327}
4328
4329
Ben Murdoch257744e2011-11-30 15:57:28 +00004330void MacroAssembler::EnterExitFrame(bool save_doubles,
4331 int stack_space) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00004332 // Set up the frame structure on the stack.
Ben Murdoch257744e2011-11-30 15:57:28 +00004333 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4334 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4335 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
Steve Block6ded16b2010-05-10 14:33:55 +01004336
Ben Murdoch257744e2011-11-30 15:57:28 +00004337 // This is how the stack will look:
4338 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4339 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4340 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4341 // [fp - 1 (==kSPOffset)] - sp of the called function
4342 // [fp - 2 (==kCodeOffset)] - CodeObject
4343 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4344 // new stack (will contain saved ra)
Steve Block6ded16b2010-05-10 14:33:55 +01004345
4346 // Save registers.
Ben Murdoch257744e2011-11-30 15:57:28 +00004347 addiu(sp, sp, -4 * kPointerSize);
4348 sw(ra, MemOperand(sp, 3 * kPointerSize));
4349 sw(fp, MemOperand(sp, 2 * kPointerSize));
Ben Murdochc7cc0282012-03-05 14:35:55 +00004350 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
Steve Block6ded16b2010-05-10 14:33:55 +01004351
Ben Murdoch257744e2011-11-30 15:57:28 +00004352 if (emit_debug_code()) {
4353 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4354 }
4355
4356 li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
4357 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01004358
4359 // Save the frame pointer and the context in top.
Ben Murdoch589d6972011-11-30 16:04:58 +00004360 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004361 sw(fp, MemOperand(t8));
Ben Murdoch589d6972011-11-30 16:04:58 +00004362 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004363 sw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004364
Ben Murdoch257744e2011-11-30 15:57:28 +00004365 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
Steve Block44f0eee2011-05-26 01:26:41 +01004366 if (save_doubles) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004367 // The stack must be allign to 0 modulo 8 for stores with sdc1.
Steve Block44f0eee2011-05-26 01:26:41 +01004368 ASSERT(kDoubleSize == frame_alignment);
Ben Murdoch257744e2011-11-30 15:57:28 +00004369 if (frame_alignment > 0) {
4370 ASSERT(IsPowerOf2(frame_alignment));
4371 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4372 }
4373 int space = FPURegister::kNumRegisters * kDoubleSize;
Steve Block44f0eee2011-05-26 01:26:41 +01004374 Subu(sp, sp, Operand(space));
4375 // Remember: we only need to save every 2nd double FPU value.
4376 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4377 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00004378 sdc1(reg, MemOperand(sp, i * kDoubleSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004379 }
Steve Block44f0eee2011-05-26 01:26:41 +01004380 }
Ben Murdoch257744e2011-11-30 15:57:28 +00004381
4382 // Reserve place for the return address, stack space and an optional slot
4383 // (used by the DirectCEntryStub to hold the return value if a struct is
4384 // returned) and align the frame preparing for calling the runtime function.
4385 ASSERT(stack_space >= 0);
4386 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4387 if (frame_alignment > 0) {
4388 ASSERT(IsPowerOf2(frame_alignment));
4389 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4390 }
4391
4392 // Set the exit frame sp value to point just before the return address
4393 // location.
4394 addiu(at, sp, kPointerSize);
4395 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block6ded16b2010-05-10 14:33:55 +01004396}
4397
4398
Ben Murdoch257744e2011-11-30 15:57:28 +00004399void MacroAssembler::LeaveExitFrame(bool save_doubles,
4400 Register argument_count) {
Steve Block44f0eee2011-05-26 01:26:41 +01004401 // Optionally restore all double registers.
4402 if (save_doubles) {
Steve Block44f0eee2011-05-26 01:26:41 +01004403 // Remember: we only need to restore every 2nd double FPU value.
Ben Murdoch257744e2011-11-30 15:57:28 +00004404 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
Steve Block44f0eee2011-05-26 01:26:41 +01004405 for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4406 FPURegister reg = FPURegister::from_code(i);
Ben Murdoch257744e2011-11-30 15:57:28 +00004407 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
Steve Block44f0eee2011-05-26 01:26:41 +01004408 }
4409 }
4410
Steve Block6ded16b2010-05-10 14:33:55 +01004411 // Clear top frame.
Ben Murdoch589d6972011-11-30 16:04:58 +00004412 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004413 sw(zero_reg, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004414
4415 // Restore current context from top and clear it in debug mode.
Ben Murdoch589d6972011-11-30 16:04:58 +00004416 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
Steve Block44f0eee2011-05-26 01:26:41 +01004417 lw(cp, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004418#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01004419 sw(a3, MemOperand(t8));
Steve Block6ded16b2010-05-10 14:33:55 +01004420#endif
4421
4422 // Pop the arguments, restore registers, and return.
4423 mov(sp, fp); // Respect ABI stack constraint.
Ben Murdoch257744e2011-11-30 15:57:28 +00004424 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4425 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4426 addiu(sp, sp, 8);
4427 if (argument_count.is_valid()) {
4428 sll(t8, argument_count, kPointerSizeLog2);
4429 addu(sp, sp, t8);
4430 }
Steve Block6ded16b2010-05-10 14:33:55 +01004431}
4432
4433
Steve Block44f0eee2011-05-26 01:26:41 +01004434void MacroAssembler::InitializeNewString(Register string,
4435 Register length,
4436 Heap::RootListIndex map_index,
4437 Register scratch1,
4438 Register scratch2) {
4439 sll(scratch1, length, kSmiTagSize);
4440 LoadRoot(scratch2, map_index);
4441 sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4442 li(scratch1, Operand(String::kEmptyHashField));
4443 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4444 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4445}
4446
4447
4448int MacroAssembler::ActivationFrameAlignment() {
4449#if defined(V8_HOST_ARCH_MIPS)
4450 // Running on the real platform. Use the alignment as mandated by the local
4451 // environment.
4452 // Note: This will break if we ever start generating snapshots on one Mips
4453 // platform for another Mips platform with a different alignment.
4454 return OS::ActivationFrameAlignment();
4455#else // defined(V8_HOST_ARCH_MIPS)
4456 // If we are using the simulator then we should always align to the expected
4457 // alignment. As the simulator is used to generate snapshots we do not know
4458 // if the target platform will need alignment, so this is controlled from a
4459 // flag.
4460 return FLAG_sim_stack_alignment;
4461#endif // defined(V8_HOST_ARCH_MIPS)
4462}
4463
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004464
Ben Murdoch257744e2011-11-30 15:57:28 +00004465void MacroAssembler::AssertStackIsAligned() {
4466 if (emit_debug_code()) {
4467 const int frame_alignment = ActivationFrameAlignment();
4468 const int frame_alignment_mask = frame_alignment - 1;
Steve Block44f0eee2011-05-26 01:26:41 +01004469
Ben Murdoch257744e2011-11-30 15:57:28 +00004470 if (frame_alignment > kPointerSize) {
4471 Label alignment_as_expected;
4472 ASSERT(IsPowerOf2(frame_alignment));
4473 andi(at, sp, frame_alignment_mask);
4474 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4475 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4476 stop("Unexpected stack alignment");
4477 bind(&alignment_as_expected);
4478 }
Steve Block6ded16b2010-05-10 14:33:55 +01004479 }
Steve Block6ded16b2010-05-10 14:33:55 +01004480}
4481
Steve Block44f0eee2011-05-26 01:26:41 +01004482
Steve Block44f0eee2011-05-26 01:26:41 +01004483void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4484 Register reg,
4485 Register scratch,
4486 Label* not_power_of_two_or_zero) {
4487 Subu(scratch, reg, Operand(1));
4488 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4489 scratch, Operand(zero_reg));
4490 and_(at, scratch, reg); // In the delay slot.
4491 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4492}
4493
4494
4495void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4496 Register reg2,
4497 Label* on_not_both_smi) {
4498 STATIC_ASSERT(kSmiTag == 0);
4499 ASSERT_EQ(1, kSmiTagMask);
4500 or_(at, reg1, reg2);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004501 JumpIfNotSmi(at, on_not_both_smi);
Steve Block44f0eee2011-05-26 01:26:41 +01004502}
4503
4504
4505void MacroAssembler::JumpIfEitherSmi(Register reg1,
4506 Register reg2,
4507 Label* on_either_smi) {
4508 STATIC_ASSERT(kSmiTag == 0);
4509 ASSERT_EQ(1, kSmiTagMask);
4510 // Both Smi tags must be 1 (not Smi).
4511 and_(at, reg1, reg2);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004512 JumpIfSmi(at, on_either_smi);
Steve Block44f0eee2011-05-26 01:26:41 +01004513}
4514
4515
4516void MacroAssembler::AbortIfSmi(Register object) {
4517 STATIC_ASSERT(kSmiTag == 0);
4518 andi(at, object, kSmiTagMask);
4519 Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4520}
4521
4522
4523void MacroAssembler::AbortIfNotSmi(Register object) {
4524 STATIC_ASSERT(kSmiTag == 0);
4525 andi(at, object, kSmiTagMask);
4526 Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4527}
4528
4529
Ben Murdoch257744e2011-11-30 15:57:28 +00004530void MacroAssembler::AbortIfNotString(Register object) {
4531 STATIC_ASSERT(kSmiTag == 0);
4532 And(t0, object, Operand(kSmiTagMask));
4533 Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4534 push(object);
4535 lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4536 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4537 Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4538 pop(object);
4539}
4540
4541
Steve Block44f0eee2011-05-26 01:26:41 +01004542void MacroAssembler::AbortIfNotRootValue(Register src,
4543 Heap::RootListIndex root_value_index,
4544 const char* message) {
4545 ASSERT(!src.is(at));
4546 LoadRoot(at, root_value_index);
4547 Assert(eq, message, src, Operand(at));
4548}
4549
4550
4551void MacroAssembler::JumpIfNotHeapNumber(Register object,
4552 Register heap_number_map,
4553 Register scratch,
4554 Label* on_not_heap_number) {
4555 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4556 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4557 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4558}
4559
4560
4561void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4562 Register first,
4563 Register second,
4564 Register scratch1,
4565 Register scratch2,
4566 Label* failure) {
4567 // Test that both first and second are sequential ASCII strings.
4568 // Assume that they are non-smis.
4569 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4570 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4571 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4572 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4573
4574 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4575 scratch2,
4576 scratch1,
4577 scratch2,
4578 failure);
4579}
4580
4581
4582void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4583 Register second,
4584 Register scratch1,
4585 Register scratch2,
4586 Label* failure) {
4587 // Check that neither is a smi.
4588 STATIC_ASSERT(kSmiTag == 0);
4589 And(scratch1, first, Operand(second));
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004590 JumpIfSmi(scratch1, failure);
Steve Block44f0eee2011-05-26 01:26:41 +01004591 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4592 second,
4593 scratch1,
4594 scratch2,
4595 failure);
4596}
4597
4598
4599void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4600 Register first,
4601 Register second,
4602 Register scratch1,
4603 Register scratch2,
4604 Label* failure) {
4605 int kFlatAsciiStringMask =
4606 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4607 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4608 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
4609 andi(scratch1, first, kFlatAsciiStringMask);
4610 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4611 andi(scratch2, second, kFlatAsciiStringMask);
4612 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4613}
4614
4615
4616void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4617 Register scratch,
4618 Label* failure) {
4619 int kFlatAsciiStringMask =
4620 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4621 int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4622 And(scratch, type, Operand(kFlatAsciiStringMask));
4623 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4624}
4625
4626
4627static const int kRegisterPassedArguments = 4;
4628
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004629int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4630 int num_double_arguments) {
4631 int stack_passed_words = 0;
4632 num_reg_arguments += 2 * num_double_arguments;
4633
4634 // Up to four simple arguments are passed in registers a0..a3.
4635 if (num_reg_arguments > kRegisterPassedArguments) {
4636 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4637 }
4638 stack_passed_words += kCArgSlotCount;
4639 return stack_passed_words;
4640}
4641
4642
4643void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4644 int num_double_arguments,
4645 Register scratch) {
Steve Block44f0eee2011-05-26 01:26:41 +01004646 int frame_alignment = ActivationFrameAlignment();
4647
Steve Block44f0eee2011-05-26 01:26:41 +01004648 // Up to four simple arguments are passed in registers a0..a3.
4649 // Those four arguments must have reserved argument slots on the stack for
4650 // mips, even though those argument slots are not normally used.
4651 // Remaining arguments are pushed on the stack, above (higher address than)
4652 // the argument slots.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004653 int stack_passed_arguments = CalculateStackPassedWords(
4654 num_reg_arguments, num_double_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004655 if (frame_alignment > kPointerSize) {
4656 // Make stack end at alignment and make room for num_arguments - 4 words
4657 // and the original value of sp.
4658 mov(scratch, sp);
4659 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4660 ASSERT(IsPowerOf2(frame_alignment));
4661 And(sp, sp, Operand(-frame_alignment));
4662 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4663 } else {
4664 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4665 }
4666}
4667
4668
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004669void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4670 Register scratch) {
4671 PrepareCallCFunction(num_reg_arguments, 0, scratch);
4672}
4673
4674
Steve Block44f0eee2011-05-26 01:26:41 +01004675void MacroAssembler::CallCFunction(ExternalReference function,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004676 int num_reg_arguments,
4677 int num_double_arguments) {
4678 li(t8, Operand(function));
4679 CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004680}
4681
4682
4683void MacroAssembler::CallCFunction(Register function,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004684 int num_reg_arguments,
4685 int num_double_arguments) {
4686 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
4687}
4688
4689
4690void MacroAssembler::CallCFunction(ExternalReference function,
Steve Block44f0eee2011-05-26 01:26:41 +01004691 int num_arguments) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004692 CallCFunction(function, num_arguments, 0);
4693}
4694
4695
4696void MacroAssembler::CallCFunction(Register function,
4697 int num_arguments) {
4698 CallCFunction(function, num_arguments, 0);
Steve Block44f0eee2011-05-26 01:26:41 +01004699}
4700
4701
4702void MacroAssembler::CallCFunctionHelper(Register function,
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004703 int num_reg_arguments,
4704 int num_double_arguments) {
4705 ASSERT(has_frame());
Steve Block44f0eee2011-05-26 01:26:41 +01004706 // Make sure that the stack is aligned before calling a C function unless
4707 // running in the simulator. The simulator has its own alignment check which
4708 // provides more information.
4709 // The argument stots are presumed to have been set up by
4710 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4711
4712#if defined(V8_HOST_ARCH_MIPS)
4713 if (emit_debug_code()) {
4714 int frame_alignment = OS::ActivationFrameAlignment();
4715 int frame_alignment_mask = frame_alignment - 1;
4716 if (frame_alignment > kPointerSize) {
4717 ASSERT(IsPowerOf2(frame_alignment));
4718 Label alignment_as_expected;
4719 And(at, sp, Operand(frame_alignment_mask));
4720 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4721 // Don't use Check here, as it will call Runtime_Abort possibly
4722 // re-entering here.
4723 stop("Unexpected alignment in CallCFunction");
4724 bind(&alignment_as_expected);
4725 }
4726 }
4727#endif // V8_HOST_ARCH_MIPS
4728
4729 // Just call directly. The function called cannot cause a GC, or
4730 // allow preemption, so the return address in the link register
4731 // stays correct.
Steve Block44f0eee2011-05-26 01:26:41 +01004732
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004733 if (!function.is(t9)) {
Ben Murdoch257744e2011-11-30 15:57:28 +00004734 mov(t9, function);
Steve Block44f0eee2011-05-26 01:26:41 +01004735 function = t9;
4736 }
4737
4738 Call(function);
4739
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004740 int stack_passed_arguments = CalculateStackPassedWords(
4741 num_reg_arguments, num_double_arguments);
Steve Block44f0eee2011-05-26 01:26:41 +01004742
4743 if (OS::ActivationFrameAlignment() > kPointerSize) {
4744 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
4745 } else {
4746 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
4747 }
4748}
4749
4750
4751#undef BRANCH_ARGS_CHECK
4752
4753
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004754void MacroAssembler::PatchRelocatedValue(Register li_location,
4755 Register scratch,
4756 Register new_value) {
4757 lw(scratch, MemOperand(li_location));
4758 // At this point scratch is a lui(at, ...) instruction.
4759 if (emit_debug_code()) {
4760 And(scratch, scratch, kOpcodeMask);
4761 Check(eq, "The instruction to patch should be a lui.",
4762 scratch, Operand(LUI));
4763 lw(scratch, MemOperand(li_location));
4764 }
4765 srl(t9, new_value, kImm16Bits);
4766 Ins(scratch, t9, 0, kImm16Bits);
4767 sw(scratch, MemOperand(li_location));
4768
4769 lw(scratch, MemOperand(li_location, kInstrSize));
4770 // scratch is now ori(at, ...).
4771 if (emit_debug_code()) {
4772 And(scratch, scratch, kOpcodeMask);
4773 Check(eq, "The instruction to patch should be an ori.",
4774 scratch, Operand(ORI));
4775 lw(scratch, MemOperand(li_location, kInstrSize));
4776 }
4777 Ins(scratch, new_value, 0, kImm16Bits);
4778 sw(scratch, MemOperand(li_location, kInstrSize));
4779
4780 // Update the I-cache so the new lui and ori can be executed.
4781 FlushICache(li_location, 2);
4782}
4783
4784void MacroAssembler::GetRelocatedValue(Register li_location,
4785 Register value,
4786 Register scratch) {
4787 lw(value, MemOperand(li_location));
4788 if (emit_debug_code()) {
4789 And(value, value, kOpcodeMask);
4790 Check(eq, "The instruction should be a lui.",
4791 value, Operand(LUI));
4792 lw(value, MemOperand(li_location));
4793 }
4794
4795 // value now holds a lui instruction. Extract the immediate.
4796 sll(value, value, kImm16Bits);
4797
4798 lw(scratch, MemOperand(li_location, kInstrSize));
4799 if (emit_debug_code()) {
4800 And(scratch, scratch, kOpcodeMask);
4801 Check(eq, "The instruction should be an ori.",
4802 scratch, Operand(ORI));
4803 lw(scratch, MemOperand(li_location, kInstrSize));
4804 }
4805 // "scratch" now holds an ori instruction. Extract the immediate.
4806 andi(scratch, scratch, kImm16Mask);
4807
4808 // Merge the results.
4809 or_(value, value, scratch);
4810}
4811
4812
4813void MacroAssembler::CheckPageFlag(
4814 Register object,
4815 Register scratch,
4816 int mask,
4817 Condition cc,
4818 Label* condition_met) {
4819 And(scratch, object, Operand(~Page::kPageAlignmentMask));
4820 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
4821 And(scratch, scratch, Operand(mask));
4822 Branch(condition_met, cc, scratch, Operand(zero_reg));
4823}
4824
4825
4826void MacroAssembler::JumpIfBlack(Register object,
4827 Register scratch0,
4828 Register scratch1,
4829 Label* on_black) {
4830 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4831 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4832}
4833
4834
4835void MacroAssembler::HasColor(Register object,
4836 Register bitmap_scratch,
4837 Register mask_scratch,
4838 Label* has_color,
4839 int first_bit,
4840 int second_bit) {
4841 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
4842 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
4843
4844 GetMarkBits(object, bitmap_scratch, mask_scratch);
4845
4846 Label other_color, word_boundary;
4847 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4848 And(t8, t9, Operand(mask_scratch));
4849 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
4850 // Shift left 1 by adding.
4851 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
4852 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
4853 And(t8, t9, Operand(mask_scratch));
4854 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
4855 jmp(&other_color);
4856
4857 bind(&word_boundary);
4858 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
4859 And(t9, t9, Operand(1));
4860 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
4861 bind(&other_color);
4862}
4863
4864
4865// Detect some, but not all, common pointer-free objects. This is used by the
4866// incremental write barrier which doesn't care about oddballs (they are always
4867// marked black immediately so this code is not hit).
4868void MacroAssembler::JumpIfDataObject(Register value,
4869 Register scratch,
4870 Label* not_data_object) {
4871 ASSERT(!AreAliased(value, scratch, t8, no_reg));
4872 Label is_data_object;
4873 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
4874 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4875 Branch(&is_data_object, eq, t8, Operand(scratch));
4876 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4877 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4878 // If it's a string and it's not a cons string then it's an object containing
4879 // no GC pointers.
4880 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4881 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
4882 Branch(not_data_object, ne, t8, Operand(zero_reg));
4883 bind(&is_data_object);
4884}
4885
4886
4887void MacroAssembler::GetMarkBits(Register addr_reg,
4888 Register bitmap_reg,
4889 Register mask_reg) {
4890 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
4891 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
4892 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4893 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4894 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
4895 sll(t8, t8, kPointerSizeLog2);
4896 Addu(bitmap_reg, bitmap_reg, t8);
4897 li(t8, Operand(1));
4898 sllv(mask_reg, t8, mask_reg);
4899}
4900
4901
4902void MacroAssembler::EnsureNotWhite(
4903 Register value,
4904 Register bitmap_scratch,
4905 Register mask_scratch,
4906 Register load_scratch,
4907 Label* value_is_white_and_not_data) {
4908 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
4909 GetMarkBits(value, bitmap_scratch, mask_scratch);
4910
4911 // If the value is black or grey we don't need to do anything.
4912 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4913 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4914 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4915 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4916
4917 Label done;
4918
4919 // Since both black and grey have a 1 in the first position and white does
4920 // not have a 1 there we only need to check one bit.
4921 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4922 And(t8, mask_scratch, load_scratch);
4923 Branch(&done, ne, t8, Operand(zero_reg));
4924
Ben Murdochc7cc0282012-03-05 14:35:55 +00004925 if (emit_debug_code()) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00004926 // Check for impossible bit pattern.
4927 Label ok;
4928 // sll may overflow, making the check conservative.
4929 sll(t8, mask_scratch, 1);
4930 And(t8, load_scratch, t8);
4931 Branch(&ok, eq, t8, Operand(zero_reg));
4932 stop("Impossible marking bit pattern");
4933 bind(&ok);
4934 }
4935
4936 // Value is white. We check whether it is data that doesn't need scanning.
4937 // Currently only checks for HeapNumber and non-cons strings.
4938 Register map = load_scratch; // Holds map while checking type.
4939 Register length = load_scratch; // Holds length of object after testing type.
4940 Label is_data_object;
4941
4942 // Check for heap-number
4943 lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
4944 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4945 {
4946 Label skip;
4947 Branch(&skip, ne, t8, Operand(map));
4948 li(length, HeapNumber::kSize);
4949 Branch(&is_data_object);
4950 bind(&skip);
4951 }
4952
4953 // Check for strings.
4954 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4955 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4956 // If it's a string and it's not a cons string then it's an object containing
4957 // no GC pointers.
4958 Register instance_type = load_scratch;
4959 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4960 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
4961 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
4962 // It's a non-indirect (non-cons and non-slice) string.
4963 // If it's external, the length is just ExternalString::kSize.
4964 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4965 // External strings are the only ones with the kExternalStringTag bit
4966 // set.
4967 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4968 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4969 And(t8, instance_type, Operand(kExternalStringTag));
4970 {
4971 Label skip;
4972 Branch(&skip, eq, t8, Operand(zero_reg));
4973 li(length, ExternalString::kSize);
4974 Branch(&is_data_object);
4975 bind(&skip);
4976 }
4977
4978 // Sequential string, either ASCII or UC16.
4979 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4980 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4981 // getting the length multiplied by 2.
4982 ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
4983 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4984 lw(t9, FieldMemOperand(value, String::kLengthOffset));
4985 And(t8, instance_type, Operand(kStringEncodingMask));
4986 {
4987 Label skip;
4988 Branch(&skip, eq, t8, Operand(zero_reg));
4989 srl(t9, t9, 1);
4990 bind(&skip);
4991 }
4992 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
4993 And(length, length, Operand(~kObjectAlignmentMask));
4994
4995 bind(&is_data_object);
4996 // Value is a data object, and it is white. Mark it black. Since we know
4997 // that the object is white we can make it black by flipping one bit.
4998 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4999 Or(t8, t8, Operand(mask_scratch));
5000 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5001
5002 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5003 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5004 Addu(t8, t8, Operand(length));
5005 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5006
5007 bind(&done);
5008}
5009
5010
Ben Murdoch257744e2011-11-30 15:57:28 +00005011void MacroAssembler::LoadInstanceDescriptors(Register map,
5012 Register descriptors) {
5013 lw(descriptors,
5014 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
5015 Label not_smi;
5016 JumpIfNotSmi(descriptors, &not_smi);
5017 li(descriptors, Operand(FACTORY->empty_descriptor_array()));
5018 bind(&not_smi);
5019}
5020
5021
Ben Murdoch592a9fc2012-03-05 11:04:45 +00005022void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5023 ASSERT(!output_reg.is(input_reg));
5024 Label done;
5025 li(output_reg, Operand(255));
5026 // Normal branch: nop in delay slot.
5027 Branch(&done, gt, input_reg, Operand(output_reg));
5028 // Use delay slot in this branch.
5029 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5030 mov(output_reg, zero_reg); // In delay slot.
5031 mov(output_reg, input_reg); // Value is in range 0..255.
5032 bind(&done);
5033}
5034
5035
5036void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5037 DoubleRegister input_reg,
5038 DoubleRegister temp_double_reg) {
5039 Label above_zero;
5040 Label done;
5041 Label in_bounds;
5042
5043 Move(temp_double_reg, 0.0);
5044 BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5045
5046 // Double value is less than zero, NaN or Inf, return 0.
5047 mov(result_reg, zero_reg);
5048 Branch(&done);
5049
5050 // Double value is >= 255, return 255.
5051 bind(&above_zero);
5052 Move(temp_double_reg, 255.0);
5053 BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5054 li(result_reg, Operand(255));
5055 Branch(&done);
5056
5057 // In 0-255 range, round and truncate.
5058 bind(&in_bounds);
5059 round_w_d(temp_double_reg, input_reg);
5060 mfc1(result_reg, temp_double_reg);
5061 bind(&done);
5062}
5063
5064
5065bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5066 if (r1.is(r2)) return true;
5067 if (r1.is(r3)) return true;
5068 if (r1.is(r4)) return true;
5069 if (r2.is(r3)) return true;
5070 if (r2.is(r4)) return true;
5071 if (r3.is(r4)) return true;
5072 return false;
5073}
5074
5075
Steve Block44f0eee2011-05-26 01:26:41 +01005076CodePatcher::CodePatcher(byte* address, int instructions)
5077 : address_(address),
5078 instructions_(instructions),
5079 size_(instructions * Assembler::kInstrSize),
Ben Murdoch257744e2011-11-30 15:57:28 +00005080 masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
Steve Block44f0eee2011-05-26 01:26:41 +01005081 // Create a new macro assembler pointing to the address of the code to patch.
5082 // The size is adjusted with kGap on order for the assembler to generate size
5083 // bytes of instructions without failing with buffer size constraints.
5084 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5085}
5086
5087
5088CodePatcher::~CodePatcher() {
5089 // Indicate that code has changed.
5090 CPU::FlushICache(address_, size_);
5091
5092 // Check that the code was patched as expected.
5093 ASSERT(masm_.pc_ == address_ + size_);
5094 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5095}
5096
5097
Ben Murdoch257744e2011-11-30 15:57:28 +00005098void CodePatcher::Emit(Instr instr) {
5099 masm()->emit(instr);
Steve Block44f0eee2011-05-26 01:26:41 +01005100}
5101
5102
5103void CodePatcher::Emit(Address addr) {
5104 masm()->emit(reinterpret_cast<Instr>(addr));
5105}
5106
5107
Ben Murdoch257744e2011-11-30 15:57:28 +00005108void CodePatcher::ChangeBranchCondition(Condition cond) {
5109 Instr instr = Assembler::instr_at(masm_.pc_);
5110 ASSERT(Assembler::IsBranch(instr));
5111 uint32_t opcode = Assembler::GetOpcodeField(instr);
5112 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5113 // branch instructions (with opcode being the branch type).
5114 // There are some special cases (see Assembler::IsBranch()) so extending this
5115 // would be tricky.
5116 ASSERT(opcode == BEQ ||
5117 opcode == BNE ||
5118 opcode == BLEZ ||
5119 opcode == BGTZ ||
5120 opcode == BEQL ||
5121 opcode == BNEL ||
5122 opcode == BLEZL ||
5123 opcode == BGTZL);
5124 opcode = (cond == eq) ? BEQ : BNE;
5125 instr = (instr & ~kOpcodeMask) | opcode;
5126 masm_.emit(instr);
5127}
Steve Block44f0eee2011-05-26 01:26:41 +01005128
5129
Andrei Popescu31002712010-02-23 13:46:05 +00005130} } // namespace v8::internal
5131
Leon Clarkef7060e22010-06-03 12:02:55 +01005132#endif // V8_TARGET_ARCH_MIPS