Upgrade to 3.29
Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.
Bug: 17370214
Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e93a417..604293b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,40 +1,21 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#if V8_TARGET_ARCH_MIPS
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
namespace v8 {
namespace internal {
@@ -42,7 +23,6 @@
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -51,6 +31,43 @@
}
+void MacroAssembler::Load(Register dst,
+ const MemOperand& src,
+ Representation r) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8()) {
+ lb(dst, src);
+ } else if (r.IsUInteger8()) {
+ lbu(dst, src);
+ } else if (r.IsInteger16()) {
+ lh(dst, src);
+ } else if (r.IsUInteger16()) {
+ lhu(dst, src);
+ } else {
+ lw(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(Register src,
+ const MemOperand& dst,
+ Representation r) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ sb(src, dst);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ sh(src, dst);
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ sw(src, dst);
+ }
+}
+
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -81,25 +98,12 @@
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- li(result, Operand(cell));
- lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- li(result, Operand(object));
- }
-}
-
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
+ DCHECK(num_unsaved >= 0);
if (num_unsaved > 0) {
Subu(sp, sp, Operand(num_unsaved * kPointerSize));
}
@@ -116,32 +120,6 @@
}
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- PushSafepointRegisters();
- Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
- FPURegister reg = FPURegister::FromAllocationIndex(i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize));
- }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
- FPURegister reg = FPURegister::FromAllocationIndex(i);
- ldc1(reg, MemOperand(sp, i * kDoubleSize));
- }
- Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
- PopSafepointRegisters();
-}
-
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
- Register dst) {
- sw(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
sw(src, SafepointRegisterSlot(dst));
}
@@ -167,7 +145,7 @@
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -177,7 +155,7 @@
Register scratch,
Condition cc,
Label* branch) {
- ASSERT(cc == eq || cc == ne);
+ DCHECK(cc == eq || cc == ne);
And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
Branch(branch, cc, scratch,
Operand(ExternalReference::new_space_start(isolate())));
@@ -192,8 +170,9 @@
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!AreAliased(value, dst, t8, object));
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -205,7 +184,7 @@
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
Addu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
@@ -222,15 +201,91 @@
ra_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+ li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+ li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber 4 registers: object, map, dst, ip. The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode) {
+ if (emit_debug_code()) {
+ DCHECK(!dst.is(at));
+ lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ dst,
+ Operand(isolate()->factory()->meta_map()));
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
+ Check(eq,
+ kWrongAddressOrValuePassedToRecordWrite,
+ map,
+ Operand(at));
+ }
+
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+
+ Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, at, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -238,38 +293,45 @@
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- RAStatus ra_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!AreAliased(object, address, value, t8));
- ASSERT(!AreAliased(object, address, value, t9));
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!AreAliased(object, address, value, t8));
+ DCHECK(!AreAliased(object, address, value, t9));
if (emit_debug_code()) {
lw(at, MemOperand(address));
Assert(
- eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
+ eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ }
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -280,7 +342,8 @@
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
@@ -288,11 +351,16 @@
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
+ value);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+ li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
}
@@ -325,12 +393,11 @@
if (and_then == kFallThroughAtEnd) {
Branch(&done, eq, t8, Operand(zero_reg));
} else {
- ASSERT(and_then == kReturnAtEnd);
+ DCHECK(and_then == kReturnAtEnd);
Ret(eq, t8, Operand(zero_reg));
}
push(ra);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
@@ -349,56 +416,55 @@
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(at));
- ASSERT(!scratch.is(at));
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!holder_reg.is(at));
+ DCHECK(!scratch.is(at));
// Load current lexical context from the stack frame.
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- Check(ne, "we should not have an empty lexical context",
+ Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
scratch, Operand(zero_reg));
#endif
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the global_context_map.
+ // Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
Branch(&same_contexts, eq, scratch, Operand(at));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, "JSGlobalProxy::context() should not be null.",
+ Check(ne, kJSGlobalProxyContextShouldNotBeNull,
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kGlobalContextMapRootIndex);
- Check(eq, "JSGlobalObject::global_context should be a global context.",
+ LoadRoot(at, Heap::kNativeContextMapRootIndex);
+ Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
// Restore at to holder's context.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@@ -415,6 +481,9 @@
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -494,8 +563,7 @@
Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
@@ -505,7 +573,7 @@
and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
sll(at, reg2, 1); // 2x.
addu(reg2, reg2, at); // reg2 = reg2 * 3.
@@ -514,7 +582,7 @@
addu(reg2, elements, at);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
- if (i != kProbes - 1) {
+ if (i != kNumberDictionaryProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
@@ -548,7 +616,7 @@
addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
addu(rd, rs, at);
}
@@ -564,7 +632,7 @@
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
subu(rd, rs, at);
}
@@ -574,7 +642,7 @@
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, rt.rm());
mflo(rd);
} else {
@@ -582,9 +650,9 @@
}
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
mult(rs, at);
mflo(rd);
} else {
@@ -594,12 +662,77 @@
}
+void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ muh(rd_hi, rs, rt.rm());
+ mul(rd_lo, rs, rt.rm());
+ } else {
+ DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+ mul(rd_lo, rs, rt.rm());
+ muh(rd_hi, rs, rt.rm());
+ }
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mflo(rd_lo);
+ mfhi(rd_hi);
+ } else {
+ if (rd_lo.is(rs)) {
+ DCHECK(!rd_hi.is(rs));
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ muh(rd_hi, rs, at);
+ mul(rd_lo, rs, at);
+ } else {
+ DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+ mul(rd_lo, rs, at);
+ muh(rd_hi, rs, at);
+ }
+ }
+ }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ muh(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ mult(rs, at);
+ mfhi(rd);
+ } else {
+ muh(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
mult(rs, at);
}
@@ -611,7 +744,7 @@
multu(rs, rt.rm());
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
multu(rs, at);
}
@@ -623,19 +756,68 @@
div(rs, rt.rm());
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
div(rs, at);
}
}
+void MacroAssembler::Div(Register rem, Register res,
+ Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, rt.rm());
+ mod(rem, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mflo(res);
+ mfhi(rem);
+ } else {
+ div(res, rs, at);
+ mod(rem, rs, at);
+ }
+ }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, rt.rm());
+ mfhi(rd);
+ } else {
+ mod(rd, rs, rt.rm());
+ }
+ } else {
+ // li handles the relocation.
+ DCHECK(!rs.is(at));
+ li(at, rt);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ div(rs, at);
+ mfhi(rd);
+ } else {
+ mod(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
divu(rs, at);
}
@@ -650,7 +832,7 @@
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
and_(rd, rs, at);
}
@@ -666,7 +848,7 @@
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
or_(rd, rs, at);
}
@@ -682,7 +864,7 @@
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
xor_(rd, rs, at);
}
@@ -695,7 +877,7 @@
nor(rd, rs, rt.rm());
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
nor(rd, rs, at);
}
@@ -703,9 +885,9 @@
void MacroAssembler::Neg(Register rs, const Operand& rt) {
- ASSERT(rt.is_reg());
- ASSERT(!at.is(rs));
- ASSERT(!at.is(rt.rm()));
+ DCHECK(rt.is_reg());
+ DCHECK(!at.is(rs));
+ DCHECK(!at.is(rt.rm()));
li(at, -1);
xor_(rs, rt.rm(), at);
}
@@ -719,7 +901,7 @@
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
slt(rd, rs, at);
}
@@ -735,7 +917,7 @@
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
- ASSERT(!rs.is(at));
+ DCHECK(!rs.is(at));
li(at, rt);
sltu(rd, rs, at);
}
@@ -744,7 +926,7 @@
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
@@ -768,10 +950,49 @@
}
}
-//------------Pseudo-instructions-------------
+
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+ if (IsMipsArchVariant(kLoongson)) {
+ lw(zero_reg, rs);
+ } else {
+ pref(hint, rs);
+ }
+}
+
+
+// ------------Pseudo-instructions-------------
+
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+ lwr(rd, rs);
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+ swr(rd, rs);
+ swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ AllowDeferredHandleDereference smi_check;
+ if (value->IsSmi()) {
+ li(dst, Operand(value), mode);
+ } else {
+ DCHECK(value->IsHeapObject());
+ if (isolate()->heap()->InNewSpace(*value)) {
+ Handle<Cell> cell = isolate()->factory()->NewCell(value);
+ li(dst, Operand(cell));
+ lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
+ } else {
+ li(dst, Operand(value));
+ }
+ }
+}
+
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
- ASSERT(!j.is_reg());
+ DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
@@ -852,7 +1073,6 @@
void MacroAssembler::MultiPushFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -867,7 +1087,6 @@
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -882,7 +1101,6 @@
void MacroAssembler::MultiPopFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -896,7 +1114,6 @@
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
- CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
@@ -928,10 +1145,10 @@
Register rs,
uint16_t pos,
uint16_t size) {
- ASSERT(pos < 32);
- ASSERT(pos + size < 33);
+ DCHECK(pos < 32);
+ DCHECK(pos + size < 33);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
@@ -951,14 +1168,14 @@
Register rs,
uint16_t pos,
uint16_t size) {
- ASSERT(pos < 32);
- ASSERT(pos + size <= 32);
- ASSERT(size != 0);
+ DCHECK(pos < 32);
+ DCHECK(pos + size <= 32);
+ DCHECK(size != 0);
- if (kArchVariant == kMips32r2) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
ins_(rt, rs, pos, size);
} else {
- ASSERT(!rt.is(t8) && !rs.is(t8));
+ DCHECK(!rt.is(t8) && !rs.is(t8));
Subu(at, zero_reg, Operand(1));
srl(at, at, 32 - size);
and_(t8, rs, at);
@@ -987,9 +1204,9 @@
// We do this by converting rs minus the MSB to avoid sign conversion,
// then adding 2^31 to the result (if needed).
- ASSERT(!fd.is(scratch));
- ASSERT(!rs.is(t9));
- ASSERT(!rs.is(at));
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
// Save rs's MSB to t9.
Ext(t9, rs, 31, 1);
@@ -1009,8 +1226,8 @@
// Load 2^31 into f20 as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Add it to fd.
add_d(fd, fd, scratch);
@@ -1025,21 +1242,23 @@
mtc1(t8, fd);
}
+
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
trunc_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
trunc_w_d(fd, fs);
}
}
+
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
round_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
round_w_d(fd, fs);
}
@@ -1047,10 +1266,10 @@
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
floor_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
floor_w_d(fd, fs);
}
@@ -1058,10 +1277,10 @@
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
- if (kArchVariant == kLoongson && fd.is(fs)) {
- mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+ Mfhc1(t8, fs);
ceil_w_d(fd, fs);
- mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ Mthc1(t8, fs);
} else {
ceil_w_d(fd, fs);
}
@@ -1071,13 +1290,13 @@
void MacroAssembler::Trunc_uw_d(FPURegister fd,
Register rs,
FPURegister scratch) {
- ASSERT(!fd.is(scratch));
- ASSERT(!rs.is(at));
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
// Load 2^31 into scratch as its float representation.
li(at, 0x41E00000);
- mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
@@ -1101,60 +1320,137 @@
}
+void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mthc1(rt, fs);
+ } else {
+ mtc1(rt, fs.high());
+ }
+}
+
+
+void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
+ if (IsFp64Mode()) {
+ mfhc1(rt, fs);
+ } else {
+ mfc1(rt, fs.high());
+ }
+}
+
+
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
Branch(bd, target);
return;
}
- ASSERT(nan || target);
+ DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
- c(UN, D, cmp1, cmp2);
- bc1t(nan);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ } else {
+ // Use kDoubleCompareReg for comparison result. It has to be unavailable
+ // to lithium register allocator.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(nan, kDoubleCompareReg);
+ }
}
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- switch (cc) {
- case Uless:
- case less:
- c(OLT, D, cmp1, cmp2);
- bc1t(target);
- break;
- case Ugreater:
- case greater:
- c(ULE, D, cmp1, cmp2);
- bc1f(target);
- break;
- case Ugreater_equal:
- case greater_equal:
- c(ULT, D, cmp1, cmp2);
- bc1f(target);
- break;
- case Uless_equal:
- case less_equal:
- c(OLE, D, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, D, cmp1, cmp2);
- bc1t(target);
- break;
- case ne:
- c(EQ, D, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
- };
+ if (!IsMipsArchVariant(kMips32r6)) {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ switch (cc) {
+ case lt:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case gt:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case ge:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case le:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ueq:
+ c(UEQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case nue:
+ c(UEQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
+ } else {
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ // Use kDoubleCompareReg for comparison result, it is
+ // valid in fp64 (FR = 1) mode which is implied for mips32r6.
+ DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+ switch (cc) {
+ case lt:
+ cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case gt:
+ cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case ge:
+ cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case le:
+ cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case eq:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ueq:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1nez(target, kDoubleCompareReg);
+ break;
+ case ne:
+ cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ case nue:
+ cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+ bc1eqz(target, kDoubleCompareReg);
+ break;
+ default:
+ CHECK(0);
+ }
+ }
}
if (bd == PROTECT) {
@@ -1164,15 +1460,14 @@
void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(CpuFeatures::IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
+ DoubleRepresentation value_rep(imm);
// Handle special values first.
bool force_load = dst.is(kDoubleRegZero);
- if (value.bits == zero.bits && !force_load) {
+ if (value_rep == zero && !force_load) {
mov_d(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits && !force_load) {
+ } else if (value_rep == minus_zero && !force_load) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
@@ -1189,16 +1484,16 @@
// register of FPU register pair.
if (hi != 0) {
li(at, Operand(hi));
- mtc1(at, dst.high());
+ Mthc1(at, dst);
} else {
- mtc1(zero_reg, dst.high());
+ Mthc1(zero_reg, dst);
}
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
@@ -1210,7 +1505,7 @@
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
@@ -1222,11 +1517,11 @@
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
- ASSERT(cc == 0);
- ASSERT(!(rs.is(t8) || rd.is(t8)));
+ DCHECK(cc == 0);
+ DCHECK(!(rs.is(t8) || rd.is(t8)));
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
@@ -1248,11 +1543,11 @@
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
- if (kArchVariant == kLoongson) {
+ if (IsMipsArchVariant(kLoongson)) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
- ASSERT(cc == 0);
- ASSERT(!(rs.is(t8) || rd.is(t8)));
+ DCHECK(cc == 0);
+ DCHECK(!(rs.is(t8) || rd.is(t8)));
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
@@ -1274,8 +1569,8 @@
void MacroAssembler::Clz(Register rd, Register rs) {
- if (kArchVariant == kLoongson) {
- ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
+ if (IsMipsArchVariant(kLoongson)) {
+ DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
Register scratch = t9;
Label loop, end;
@@ -1295,250 +1590,76 @@
}
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-// This method implementation differs from the ARM version for performance
-// reasons.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- FPURegister double_scratch,
- Label *not_int32) {
- Label right_exponent, done;
- // Get exponent word (ENDIAN issues).
- lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, zero_reg);
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- // If we have a match of the int32-but-not-Smi exponent then skip some logic.
- Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
- // If the exponent is higher than that then go to not_int32 case. This
- // catches numbers that don't fit in a signed int32, infinities and NaNs.
- Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- Subu(scratch2, scratch2, Operand(zero_exponent));
- // Dest already has a Smi zero.
- Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!CpuFeatures::IsSupported(FPU)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- srl(dest, scratch2, HeapNumber::kExponentShift);
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- li(at, Operand(30));
- subu(dest, at, dest);
- }
- bind(&right_exponent);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
- } else {
- // On entry, dest has final downshift, scratch has original sign/exp/mant.
- // Save sign bit in top bit of dest.
- And(scratch2, scratch, Operand(0x80000000));
- Or(dest, dest, Operand(scratch2));
- // Put back the implicit 1, just above mantissa field.
- Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
-
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance. But we want to clear the sign-bit so shift one more bit
- // left, then shift right one bit.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- sll(scratch, scratch, shift_distance + 1);
- srl(scratch, scratch, 1);
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
- // The width of the field here is the same as the shift amount above.
- const int field_width = shift_distance;
- Ext(scratch2, scratch2, 32-shift_distance, field_width);
- Ins(scratch, scratch2, 0, field_width);
- // Move down according to the exponent.
- srlv(scratch, scratch, dest);
- // Prepare the negative version of our integer.
- subu(scratch2, zero_reg, scratch);
- // Trick to check sign bit (msb) held in dest, count leading zero.
- // 0 indicates negative, save negative version with conditional move.
- Clz(dest, dest);
- Movz(scratch, scratch2, dest);
- mov(dest, scratch);
- }
- bind(&done);
-}
-
-
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatures::Scope scope(FPU);
+ DCHECK(!result.is(scratch));
+ DCHECK(!double_input.is(double_scratch));
+ DCHECK(!except_flag.is(scratch));
+
+ Label done;
+
+ // Clear the except flag (0 = no exception)
+ mov(except_flag, zero_reg);
+
+ // Test for values that can be exactly represented as a signed 32-bit integer.
+ cvt_w_d(double_scratch, double_input);
+ mfc1(result, double_scratch);
+ cvt_d_w(double_scratch, double_scratch);
+ BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
- // Ingore inexact exceptions.
+ // Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
- cfc1(scratch1, FCSR);
+ cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- Round_w_d(result, double_input);
+ Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
- Trunc_w_d(result, double_input);
+ Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
- Ceil_w_d(result, double_input);
+ Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
- Floor_w_d(result, double_input);
+ Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
- ctc1(scratch1, FCSR);
+ ctc1(scratch, FCSR);
+ // Move the converted value into the result register.
+ mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
-}
-
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
- // Extract the biased exponent in result.
- Ext(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- Subu(scratch, result, HeapNumber::kExponentMask);
- Movz(result, zero_reg, scratch);
- Branch(&done, eq, scratch, Operand(zero_reg));
-
- // Express exponent as delta to (number of mantissa bits + 31).
- Subu(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- Branch(&normal_exponent, le, result, Operand(zero_reg));
- mov(result, zero_reg);
- Branch(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- And(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
- // to check for this specific case.
- Label high_shift_needed, high_shift_done;
- Branch(&high_shift_needed, lt, scratch, Operand(32));
- mov(input_high, zero_reg);
- Branch(&high_shift_done);
- bind(&high_shift_needed);
-
- // Set the implicit 1 before the mantissa part in input_high.
- Or(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- sllv(input_high, input_high, scratch);
-
- bind(&high_shift_done);
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- li(at, 32);
- subu(scratch, at, scratch);
- Branch(&pos_shift, ge, scratch, Operand(zero_reg));
-
- // Negate scratch.
- Subu(scratch, zero_reg, scratch);
- sllv(input_low, input_low, scratch);
- Branch(&shift_done);
-
- bind(&pos_shift);
- srlv(input_low, input_low, scratch);
-
- bind(&shift_done);
- Or(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- mov(scratch, sign);
- result = sign;
- sign = no_reg;
- Subu(result, zero_reg, input_high);
- Movz(result, input_high, scratch);
bind(&done);
}
-void MacroAssembler::EmitECMATruncate(Register result,
- FPURegister double_input,
- FPURegister single_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3) {
- CpuFeatures::Scope scope(FPU);
- ASSERT(!scratch2.is(result));
- ASSERT(!scratch3.is(result));
- ASSERT(!scratch3.is(scratch2));
- ASSERT(!scratch.is(result) &&
- !scratch.is(scratch2) &&
- !scratch.is(scratch3));
- ASSERT(!single_scratch.is(double_input));
-
- Label done;
- Label manual;
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ Register scratch = at;
+ Register scratch2 = t9;
// Clear cumulative exception flags and save the FCSR.
cfc1(scratch2, FCSR);
@@ -1554,16 +1675,67 @@
scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
- Branch(&done, eq, scratch, Operand(zero_reg));
+ Branch(done, eq, scratch, Operand(zero_reg));
+}
- // Load the double value and perform a manual truncation.
- Register input_high = scratch2;
- Register input_low = scratch3;
- Move(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+ DoubleRegister double_input) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ sdc1(double_input, MemOperand(sp, 0));
+
+ DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+ CallStub(&stub);
+
+ Addu(sp, sp, Operand(kDoubleSize));
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+ Label done;
+ DoubleRegister double_scratch = f12;
+ DCHECK(!result.is(object));
+
+ ldc1(double_scratch,
+ MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+ TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ push(ra);
+ DoubleToIStub stub(isolate(),
+ object,
+ result,
+ HeapNumber::kValueOffset - kHeapObjectTag,
+ true,
+ true);
+ CallStub(&stub);
+ pop(ra);
+
+ bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+ Register result,
+ Register heap_number_map,
+ Register scratch,
+ Label* not_number) {
+ Label done;
+ DCHECK(!result.is(object));
+
+ UntagAndJumpIfSmi(result, object, &done);
+ JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+ TruncateHeapNumberToI(result, object);
+
bind(&done);
}
@@ -1585,7 +1757,7 @@
// Emulated condtional branches do not emit a nop in the branch delay slot.
//
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
@@ -1626,19 +1798,27 @@
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
}
} else {
if (is_trampoline_emitted()) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ Jr(L, bdslot);
+ }
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
@@ -1669,13 +1849,14 @@
const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
- ASSERT(!rs.is(zero_reg));
+ DCHECK(!rs.is(zero_reg));
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
// NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
// rt.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
switch (cond) {
case cc_always:
@@ -1761,20 +1942,21 @@
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
b(offset);
break;
case eq:
// We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
beq(rs, r2, offset);
break;
case ne:
// We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
bne(rs, r2, offset);
@@ -1901,10 +2083,11 @@
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset;
+ int32_t offset = 0;
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
@@ -2011,20 +2194,21 @@
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
offset = shifted_branch_offset(L, false);
b(offset);
break;
case eq:
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
offset = shifted_branch_offset(L, false);
beq(rs, r2, offset);
break;
case ne:
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
offset = shifted_branch_offset(L, false);
@@ -2036,7 +2220,7 @@
offset = shifted_branch_offset(L, false);
bgtz(rs, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, r2, rs);
@@ -2053,7 +2237,7 @@
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, rs, r2);
@@ -2070,7 +2254,7 @@
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, rs, r2);
@@ -2083,7 +2267,7 @@
offset = shifted_branch_offset(L, false);
blez(rs, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, r2, rs);
@@ -2095,9 +2279,9 @@
case Ugreater:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, r2, rs);
@@ -2114,7 +2298,7 @@
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, rs, r2);
@@ -2131,7 +2315,7 @@
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, rs, r2);
@@ -2142,9 +2326,9 @@
case Uless_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
- ASSERT(!scratch.is(rs));
+ DCHECK(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, r2, rs);
@@ -2157,7 +2341,7 @@
}
}
// Check that offset could actually hold on an int16_t.
- ASSERT(is_int16(offset));
+ DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
@@ -2247,68 +2431,152 @@
li(r2, rt);
}
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
+ // Signed comparison.
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ bltzal(scratch, offset);
+ break;
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ bal(offset);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
@@ -2329,7 +2597,7 @@
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset;
+ int32_t offset = 0;
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
@@ -2339,82 +2607,177 @@
li(r2, rt);
}
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
- // Signed comparison.
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
+ // Signed comparison.
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- offset = shifted_branch_offset(L, false);
- bltzal(scratch, offset);
- break;
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case greater_equal:
+ // rs >= rt
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less:
+ // rs < r2
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case less_equal:
+ // rs <= r2
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > rt
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Ugreater_equal:
+ // rs >= rt
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless:
+ // rs < r2
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
// Check that offset could actually hold on an int16_t.
- ASSERT(is_int16(offset));
+ DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
@@ -2465,7 +2828,7 @@
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
@@ -2476,7 +2839,8 @@
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
@@ -2521,7 +2885,7 @@
if (bd == PROTECT)
nop();
- ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
+ DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -2552,18 +2916,19 @@
positions_recorder()->WriteRecordedPositions();
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
- ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+ DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()),
rmode, cond, rs, rt, bd);
}
@@ -2571,7 +2936,7 @@
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
- unsigned ast_id,
+ TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
@@ -2579,13 +2944,14 @@
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+ DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -2655,6 +3021,7 @@
nop();
}
+
void MacroAssembler::DropAndRet(int drop) {
Ret(USE_DELAY_SLOT);
addiu(sp, sp, drop * kPointerSize);
@@ -2728,18 +3095,14 @@
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
void MacroAssembler::DebugBreak() {
PrepareCEntryArgs(0);
PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
+ CEntryStub ces(isolate(), 1);
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -2766,7 +3129,7 @@
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- ASSERT_EQ(Smi::FromInt(0), 0);
+ DCHECK_EQ(Smi::FromInt(0), 0);
// The second zero_reg indicates no context.
// The first zero_reg is the NULL frame pointer.
// The operands are reversed to match the order of MultiPush/Pop.
@@ -2888,12 +3251,13 @@
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2905,37 +3269,36 @@
return;
}
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(t9));
- ASSERT(!scratch2.is(t9));
- ASSERT(!result.is(t9));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
+ DCHECK(!scratch1.is(t9));
+ DCHECK(!scratch2.is(t9));
+ DCHECK(!result.is(t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
// used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- li(topaddr, Operand(new_space_allocation_top));
- li(obj_size_reg, Operand(object_size));
+ li(topaddr, Operand(allocation_top));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -2948,15 +3311,32 @@
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ And(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
+ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(scratch2, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(obj_size_reg));
+ Addu(scratch2, result, Operand(object_size));
Branch(gc_required, Ugreater, scratch2, Operand(t9));
sw(scratch2, MemOperand(topaddr));
@@ -2967,12 +3347,12 @@
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2984,28 +3364,28 @@
return;
}
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!object_size.is(t9));
- ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
+ DCHECK(!object_size.is(t9));
+ DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
// used. On MIPS we don't have ldm so we don't need additional checks either.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ reinterpret_cast<intptr_t>(allocation_limit.address());
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
- li(topaddr, Operand(new_space_allocation_top));
+ li(topaddr, Operand(allocation_top));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -3018,12 +3398,29 @@
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, "Unexpected allocation top", result, Operand(t9));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ And(scratch2, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ }
+ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(scratch2, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -3038,7 +3435,7 @@
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+ Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
}
sw(scratch2, MemOperand(topaddr));
@@ -3060,7 +3457,7 @@
// Check that the object un-allocated is below the current top.
li(scratch, Operand(new_space_allocation_top));
lw(scratch, MemOperand(scratch));
- Check(less, "Undo allocation of non allocated memory",
+ Check(less, kUndoAllocationOfNonAllocatedMemory,
object, Operand(scratch));
#endif
// Write the address of the object to un-allocate as the current top.
@@ -3077,19 +3474,19 @@
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
sll(scratch1, length, 1); // Length in bytes, not chars.
addiu(scratch1, scratch1,
kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -3100,33 +3497,28 @@
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK(kCharSize == 1);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ // Allocate one-byte string in new space.
+ Allocate(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3135,12 +3527,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
@@ -3149,22 +3537,19 @@
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
@@ -3173,12 +3558,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
@@ -3188,23 +3569,28 @@
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+ Branch(&succeed, eq, at, Operand(zero_reg));
+ Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
+
+ bind(&succeed);
}
@@ -3214,19 +3600,25 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* need_gc) {
+ Label* need_gc,
+ TaggingMode tagging_mode,
+ MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+ AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
@@ -3246,8 +3638,8 @@
Register src,
RegList temps,
int field_count) {
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
+ DCHECK((temps & dst.bit()) == 0);
+ DCHECK((temps & src.bit()) == 0);
// Primitive implementation using only one temporary register.
Register tmp = no_reg;
@@ -3258,7 +3650,7 @@
break;
}
}
- ASSERT(!tmp.is(no_reg));
+ DCHECK(!tmp.is(no_reg));
for (int i = 0; i < field_count; i++) {
lw(tmp, FieldMemOperand(src, i * kPointerSize));
@@ -3271,11 +3663,10 @@
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- Branch(&done, eq, length, Operand(zero_reg));
+ Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
@@ -3284,13 +3675,13 @@
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
- Assert(eq, "Expecting alignment for CopyBytes",
+ Assert(eq, kExpectingAlignmentForCopyBytes,
scratch, Operand(zero_reg));
}
Branch(&byte_loop, lt, length, Operand(kPointerSize));
@@ -3299,13 +3690,24 @@
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
- sb(scratch, MemOperand(dst, 0));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
+ if (kArchEndian == kLittle) {
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ } else {
+ sb(scratch, MemOperand(dst, 3));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 0));
+ }
+
Addu(dst, dst, 4);
Subu(length, length, Operand(kPointerSize));
@@ -3341,45 +3743,50 @@
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastElementValue));
+ Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
}
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
- Register scratch4,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3405,9 +3812,12 @@
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- sw(exponent_reg, FieldMemOperand(scratch1, offset));
+ sw(mantissa_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ + kHoleNanLower32Offset));
+ sw(exponent_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ + kHoleNanUpper32Offset));
jmp(&done);
bind(&maybe_nan);
@@ -3418,43 +3828,24 @@
Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
bind(&is_nan);
// Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ LoadRoot(at, Heap::kNanValueRootIndex);
+ lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+ lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
bind(&smi_value);
Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
+ Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- sdc1(f0, MemOperand(scratch1, 0));
- } else {
- sw(mantissa_reg, MemOperand(scratch1, 0));
- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
+ mtc1(untagged_value, f2);
+ cvt_d_w(f0, f2);
+ sdc1(f0, MemOperand(scratch1, 0));
bind(&done);
}
@@ -3464,31 +3855,18 @@
Handle<Map> map,
Label* early_success,
Condition cond,
- Label* branch_to,
- CompareMapMode mode) {
+ Label* branch_to) {
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- Operand right = Operand(map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- Branch(early_success, eq, scratch, right);
- right = Operand(Handle<Map>(transitioned_fast_element_map));
- }
+ CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
+}
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- Branch(early_success, eq, scratch, right);
- right = Operand(Handle<Map>(transitioned_double_map));
- }
- }
- Branch(branch_to, cond, scratch, right);
+void MacroAssembler::CompareMapAndBranch(Register obj_map,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to) {
+ Branch(branch_to, cond, obj_map, Operand(map));
}
@@ -3496,13 +3874,12 @@
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
- CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
+ CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
bind(&success);
}
@@ -3536,68 +3913,77 @@
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- CpuFeatures::Scope scope(FPU);
+void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, v0, v1);
+ if (kArchEndian == kLittle) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, v1, v0);
+ }
} else {
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- CpuFeatures::Scope scope(FPU);
- if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- } else {
- Move(a0, a1, dreg);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
- CpuFeatures::Scope scope(FPU);
- if (!IsMipsSoftFloatABI) {
- if (dreg2.is(f12)) {
- ASSERT(!dreg1.is(f14));
- Move(f14, dreg2);
- Move(f12, dreg1);
+void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ if (kArchEndian == kLittle) {
+ Move(dst, a0, a1);
} else {
- Move(f12, dreg1);
- Move(f14, dreg2);
+ Move(dst, a1, a0);
}
} else {
- Move(a0, a1, dreg1);
- Move(a2, a3, dreg2);
+ Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
}
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
- Register reg) {
- CpuFeatures::Scope scope(FPU);
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
- Move(f12, dreg);
- Move(a2, reg);
+ Move(f12, src);
} else {
- Move(a2, reg);
- Move(a0, a1, dreg);
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src);
+ } else {
+ Move(a1, a0, src);
+ }
}
}
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be t1 to
- // follow the calling convention which requires the call type to be
- // in t1.
- ASSERT(dst.is(t1));
- if (call_kind == CALL_AS_FUNCTION) {
- li(dst, Operand(Smi::FromInt(1)));
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
+ if (!IsMipsSoftFloatABI) {
+ Move(f0, src);
} else {
- li(dst, Operand(Smi::FromInt(0)));
+ if (kArchEndian == kLittle) {
+ Move(v0, v1, src);
+ } else {
+ Move(v1, v0, src);
+ }
+ }
+}
+
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+ DoubleRegister src2) {
+ if (!IsMipsSoftFloatABI) {
+ if (src2.is(f12)) {
+ DCHECK(!src1.is(f14));
+ Move(f14, src2);
+ Move(f12, src1);
+ } else {
+ Move(f12, src1);
+ Move(f14, src2);
+ }
+ } else {
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
+ } else {
+ Move(a1, a0, src1);
+ Move(a3, a2, src2);
+ }
}
}
@@ -3612,8 +3998,7 @@
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
@@ -3623,17 +4008,16 @@
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
- // a3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(a0));
- ASSERT(expected.is_immediate() || expected.reg().is(a2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+ DCHECK(actual.is_immediate() || actual.reg().is(a0));
+ DCHECK(expected.is_immediate() || expected.reg().is(a2));
+ DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
@@ -3667,14 +4051,12 @@
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(t1, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
Branch(done);
}
} else {
- SetCallKind(t1, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(®ular_invoke);
@@ -3686,26 +4068,23 @@
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(t1, call_kind);
Call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, call_kind);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3715,46 +4094,15 @@
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
-
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(t1, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(t1, call_kind);
- Jump(code, rmode);
- }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
- ASSERT(function.is(a1));
+ DCHECK(function.is(a1));
Register expected_reg = a2;
Register code_reg = a3;
@@ -3767,28 +4115,39 @@
lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK(function.is(a1));
// Get the function and setup the context.
- LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(a3, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ li(a1, function);
+ InvokeFunction(a1, expected, actual, flag, call_wrapper);
}
@@ -3813,7 +4172,7 @@
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
- ASSERT(kNotStringTag != 0);
+ DCHECK(kNotStringTag != 0);
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -3822,6 +4181,15 @@
}
+void MacroAssembler::IsObjectNameType(Register object,
+ Register scratch,
+ Label* fail) {
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
+}
+
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -3831,14 +4199,15 @@
Register scratch,
Label* miss,
bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- GetObjectType(function, result, scratch);
- Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
+ Label non_instance;
if (miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ GetObjectType(function, result, scratch);
+ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
lw(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
lw(scratch,
@@ -3846,13 +4215,12 @@
And(scratch, scratch,
Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
Branch(miss, ne, scratch, Operand(zero_reg));
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- Branch(&non_instance, ne, scratch, Operand(zero_reg));
+ // Make sure that the function has an instance prototype.
+ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+ }
// Get the prototype or initial map from the function.
lw(result,
@@ -3871,12 +4239,15 @@
// Get the prototype from the initial map.
lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+ if (miss_on_bound_function) {
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+ }
// All done.
bind(&done);
@@ -3895,18 +4266,23 @@
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
+ cond, r1, r2, bd);
}
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+void MacroAssembler::TailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
@@ -3915,18 +4291,38 @@
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ DCHECK(function_address.is(a1) || function_address.is(a2));
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
+ lb(t9, MemOperand(t9, 0));
+ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+ // Additional parameter is the address of the actual callback.
+ li(t9, Operand(thunk_ref));
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
+ mov(t9, function_address);
+ bind(&end_profiler_check);
+
// Allocate HandleScope in callee-save registers.
li(s3, Operand(next_address));
lw(s0, MemOperand(s3, kNextOffset));
@@ -3935,41 +4331,46 @@
Addu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
- // The O32 ABI requires us to pass a pointer in a0 where the returned struct
- // (4 bytes) will be placed. This is also built into the Simulator.
- // Set up the pointer to the returned value (a0). It was allocated in
- // EnterExitFrame.
- addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, a0);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, function);
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(this, t9);
- // As mentioned above, on MIPS a pointer is returned - we need to dereference
- // it to get the actual return value (which is also a pointer).
- lw(v0, MemOperand(v0));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1, a0);
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
+ Label return_value_loaded;
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- Label skip;
- LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- Branch(&skip, eq, v0, Operand(zero_reg));
- lw(a0, MemOperand(v0));
- bind(&skip);
- mov(v0, a0);
+ // Load value from ReturnValue.
+ lw(v0, return_value_operand);
+ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
sw(s0, MemOperand(s3, kNextOffset));
if (emit_debug_code()) {
lw(a1, MemOperand(s3, kLevelOffset));
- Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
}
Subu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
@@ -3982,14 +4383,23 @@
li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ lw(cp, *context_restore_operand);
+ }
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, true);
+ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -3997,7 +4407,7 @@
mov(s0, v0);
mov(a0, v0);
PrepareCallCFunction(1, s1);
- li(a0, Operand(ExternalReference::isolate_address()));
+ li(a0, Operand(ExternalReference::isolate_address(isolate())));
CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
1);
mov(v0, s0);
@@ -4006,32 +4416,18 @@
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addiu(sp, sp, num_arguments * kPointerSize);
- }
- LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash,
- Register index) {
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- sll(index, hash, kSmiTagSize);
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
}
@@ -4086,18 +4482,18 @@
Register right,
Register overflow_dst,
Register scratch) {
- ASSERT(!dst.is(overflow_dst));
- ASSERT(!dst.is(scratch));
- ASSERT(!overflow_dst.is(scratch));
- ASSERT(!overflow_dst.is(left));
- ASSERT(!overflow_dst.is(right));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
if (left.is(right) && dst.is(left)) {
- ASSERT(!dst.is(t9));
- ASSERT(!scratch.is(t9));
- ASSERT(!left.is(t9));
- ASSERT(!right.is(t9));
- ASSERT(!overflow_dst.is(t9));
+ DCHECK(!dst.is(t9));
+ DCHECK(!scratch.is(t9));
+ DCHECK(!left.is(t9));
+ DCHECK(!right.is(t9));
+ DCHECK(!overflow_dst.is(t9));
mov(t9, right);
right = t9;
}
@@ -4128,13 +4524,13 @@
Register right,
Register overflow_dst,
Register scratch) {
- ASSERT(!dst.is(overflow_dst));
- ASSERT(!dst.is(scratch));
- ASSERT(!overflow_dst.is(scratch));
- ASSERT(!overflow_dst.is(left));
- ASSERT(!overflow_dst.is(right));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!dst.is(scratch));
+ DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
// This happens with some crankshaft code. Since Subu works fine if
// left == right, let's not make that restriction here.
@@ -4166,16 +4562,14 @@
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -4183,33 +4577,19 @@
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub);
}
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- PrepareCEntryArgs(function->nargs);
- PrepareCEntryFunction(ExternalReference(function, isolate()));
- CEntryStub stub(1, kSaveFPRegs);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments,
BranchDelaySlot bd) {
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ext);
- CEntryStub stub(1);
- CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
+ CEntryStub stub(isolate(), 1);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -4237,7 +4617,7 @@
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd) {
PrepareCEntryFunction(builtin);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
@@ -4251,17 +4631,15 @@
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
- SetCallKind(t1, CALL_AS_METHOD);
Call(t9);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, CALL_AS_METHOD);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(t9);
}
}
@@ -4270,7 +4648,7 @@
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
lw(target, FieldMemOperand(target,
@@ -4279,7 +4657,7 @@
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(a1));
+ DCHECK(!target.is(a1));
GetBuiltinFunction(a1, id);
// Load the code entry point from the builtins object.
lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
@@ -4298,7 +4676,7 @@
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
lw(scratch1, MemOperand(scratch2));
@@ -4310,7 +4688,7 @@
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
lw(scratch1, MemOperand(scratch2));
@@ -4323,25 +4701,16 @@
// -----------------------------------------------------------------------------
// Debugging.
-void MacroAssembler::Assert(Condition cc, const char* msg,
+void MacroAssembler::Assert(Condition cc, BailoutReason reason,
Register rs, Operand rt) {
if (emit_debug_code())
- Check(cc, msg, rs, rt);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(at, index);
- Check(eq, "Register did not match expected root", reg, Operand(at));
- }
+ Check(cc, reason, rs, rt);
}
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
- ASSERT(!elements.is(at));
+ DCHECK(!elements.is(at));
Label ok;
push(elements);
lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
@@ -4351,53 +4720,49 @@
Branch(&ok, eq, elements, Operand(at));
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg,
+void MacroAssembler::Check(Condition cc, BailoutReason reason,
Register rs, Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
- Abort(msg);
+ Abort(reason);
// Will not return here.
bind(&L);
}
-void MacroAssembler::Abort(const char* msg) {
+void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ stop(msg);
+ return;
+ }
#endif
- li(a0, Operand(p0));
- push(a0);
- li(a0, Operand(Smi::FromInt(p1 - p0)));
+ li(a0, Operand(Smi::FromInt(reason)));
push(a0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4405,10 +4770,10 @@
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
- // generated instructions is 14, so we use this as a maximum value.
- static const int kExpectedAbortInstructions = 14;
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ DCHECK(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
@@ -4439,46 +4804,34 @@
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+ lw(scratch,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ lw(scratch,
+ MemOperand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+ size_t offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ lw(at, FieldMemOperand(scratch, offset));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
- ASSERT(!function_in.is(map_out));
- Label done;
- lw(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ lw(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
+ lw(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
lw(function, FieldMemOperand(function,
- GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
+ GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
lw(function, MemOperand(function, Context::SlotOffset(index)));
}
@@ -4493,12 +4846,47 @@
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Branch(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
+void MacroAssembler::StubPrologue() {
+ Push(ra, fp, cp);
+ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ this, kNoCodeAgeSequenceLength);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Load the stub address to t9 and call it,
+ // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
+ CONSTANT_SIZE);
+ nop(); // Prevent jalr to jal optimization.
+ jalr(t9, a0);
+ nop(); // Branch delay slot nop.
+ nop(); // Pad the empty space.
+ } else {
+ Push(ra, fp, cp, a1);
+ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+ // Adjust fp to point to caller's fp.
+ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
@@ -4508,7 +4896,9 @@
sw(cp, MemOperand(sp, 2 * kPointerSize));
sw(t8, MemOperand(sp, 1 * kPointerSize));
sw(t9, MemOperand(sp, 0 * kPointerSize));
- addiu(fp, sp, 3 * kPointerSize);
+ // Adjust FP to point to saved FP.
+ Addu(fp, sp,
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@@ -4559,15 +4949,15 @@
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
// The stack must be allign to 0 modulo 8 for stores with sdc1.
- ASSERT(kDoubleSize == frame_alignment);
+ DCHECK(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
- int space = FPURegister::kNumRegisters * kDoubleSize;
+ int space = FPURegister::kMaxNumRegisters * kDoubleSize;
Subu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -4576,10 +4966,10 @@
// Reserve place for the return address, stack space and an optional slot
// (used by the DirectCEntryStub to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
- ASSERT(stack_space >= 0);
+ DCHECK(stack_space >= 0);
Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
@@ -4592,12 +4982,13 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
+ bool restore_context,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
@@ -4608,9 +4999,12 @@
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- lw(cp, MemOperand(t8));
+ if (restore_context) {
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ lw(cp, MemOperand(t8));
+ }
#ifdef DEBUG
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
sw(a3, MemOperand(t8));
#endif
@@ -4647,19 +5041,19 @@
int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_MIPS)
+#endif // V8_HOST_ARCH_MIPS
}
@@ -4670,7 +5064,7 @@
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
andi(at, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -4694,7 +5088,7 @@
void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
- ASSERT(!reg.is(overflow));
+ DCHECK(!reg.is(overflow));
mov(overflow, reg); // Save original value.
SmiTag(reg);
xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
@@ -4708,9 +5102,9 @@
// Fall back to slower case.
SmiTagCheckOverflow(dst, overflow);
} else {
- ASSERT(!dst.is(src));
- ASSERT(!dst.is(overflow));
- ASSERT(!src.is(overflow));
+ DCHECK(!dst.is(src));
+ DCHECK(!dst.is(overflow));
+ DCHECK(!src.is(overflow));
SmiTag(dst, src);
xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
}
@@ -4736,7 +5130,7 @@
Label* smi_label,
Register scratch,
BranchDelaySlot bd) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
@@ -4745,7 +5139,7 @@
Label* not_smi_label,
Register scratch,
BranchDelaySlot bd) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
@@ -4755,7 +5149,7 @@
Register reg2,
Label* on_not_both_smi) {
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1, kSmiTagMask);
+ DCHECK_EQ(1, kSmiTagMask);
or_(at, reg1, reg2);
JumpIfNotSmi(at, on_not_both_smi);
}
@@ -4765,45 +5159,82 @@
Register reg2,
Label* on_either_smi) {
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1, kSmiTagMask);
+ DCHECK_EQ(1, kSmiTagMask);
// Both Smi tags must be 1 (not Smi).
and_(at, reg1, reg2);
JumpIfSmi(at, on_either_smi);
}
-void MacroAssembler::AbortIfSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(ne, kOperandIsASmi, at, Operand(zero_reg));
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- andi(at, object, kSmiTagMask);
- Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Check(eq, kOperandIsASmi, at, Operand(zero_reg));
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- STATIC_ASSERT(kSmiTag == 0);
- And(t0, object, Operand(kSmiTagMask));
- Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
- pop(object);
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t0);
+ Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(at));
- LoadRoot(at, root_value_index);
- Assert(eq, message, src, Operand(at));
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t0);
+ Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
+ pop(object);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+ pop(object);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ DCHECK(!reg.is(at));
+ LoadRoot(at, index);
+ Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
+ }
}
@@ -4812,74 +5243,144 @@
Register scratch,
Label* on_not_heap_number) {
lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ sra(mask, mask, kSmiTagSize + 1);
+ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Xor(scratch1, scratch1, Operand(scratch2));
+ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ Branch(not_found);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ sra(scratch, object, 1); // Shift away the tag.
+ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
And(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
+ JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
- andi(scratch1, first, kFlatAsciiStringMask);
- Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
- andi(scratch2, second, kFlatAsciiStringMask);
- Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ DCHECK(kFlatOneByteStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
+ andi(scratch2, second, kFlatOneByteStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch, type, Operand(kFlatAsciiStringMask));
- Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
+ And(scratch, type, Operand(kFlatOneByteStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
}
@@ -4899,6 +5400,42 @@
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ Register scratch,
+ uint32_t encoding_mask) {
+ Label is_object;
+ SmiTst(string, at);
+ Check(ne, kNonObject, at, Operand(zero_reg));
+
+ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ andi(at, at, kStringRepresentationMask | kStringEncodingMask);
+ li(scratch, Operand(encoding_mask));
+ Check(eq, kUnexpectedStringType, at, Operand(scratch));
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Label index_tag_ok, index_tag_bad;
+ TrySmiTag(index, scratch, &index_tag_bad);
+ Branch(&index_tag_ok);
+ bind(&index_tag_bad);
+ Abort(kIndexIsTooLarge);
+ bind(&index_tag_ok);
+
+ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ Check(lt, kIndexIsTooLarge, index, Operand(at));
+
+ DCHECK(Smi::FromInt(0) == 0);
+ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
+
+ SmiUntag(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
@@ -4916,7 +5453,7 @@
// and the original value of sp.
mov(scratch, sp);
Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -4961,19 +5498,19 @@
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
// The argument stots are presumed to have been set up by
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
-#if defined(V8_HOST_ARCH_MIPS)
+#if V8_HOST_ARCH_MIPS
if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
And(at, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -4999,7 +5536,7 @@
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
- if (OS::ActivationFrameAlignment() > kPointerSize) {
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
@@ -5017,7 +5554,7 @@
// At this point scratch is a lui(at, ...) instruction.
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be a lui.",
+ Check(eq, kTheInstructionToPatchShouldBeALui,
scratch, Operand(LUI));
lw(scratch, MemOperand(li_location));
}
@@ -5029,7 +5566,7 @@
// scratch is now ori(at, ...).
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction to patch should be an ori.",
+ Check(eq, kTheInstructionToPatchShouldBeAnOri,
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5046,7 +5583,7 @@
lw(value, MemOperand(li_location));
if (emit_debug_code()) {
And(value, value, kOpcodeMask);
- Check(eq, "The instruction should be a lui.",
+ Check(eq, kTheInstructionShouldBeALui,
value, Operand(LUI));
lw(value, MemOperand(li_location));
}
@@ -5057,7 +5594,7 @@
lw(scratch, MemOperand(li_location, kInstrSize));
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, "The instruction should be an ori.",
+ Check(eq, kTheInstructionShouldBeAnOri,
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5082,12 +5619,24 @@
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ li(scratch, Operand(map));
+ lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ And(scratch, scratch, Operand(Map::Deprecated::kMask));
+ Branch(if_deprecated, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
@@ -5097,8 +5646,8 @@
Label* has_color,
int first_bit,
int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
GetMarkBits(object, bitmap_scratch, mask_scratch);
@@ -5127,13 +5676,13 @@
void MacroAssembler::JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object) {
- ASSERT(!AreAliased(value, scratch, t8, no_reg));
+ DCHECK(!AreAliased(value, scratch, t8, no_reg));
Label is_data_object;
lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
Branch(&is_data_object, eq, t8, Operand(scratch));
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -5146,7 +5695,7 @@
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
@@ -5164,14 +5713,14 @@
Register mask_scratch,
Register load_scratch,
Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
@@ -5210,8 +5759,8 @@
}
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
@@ -5223,8 +5772,8 @@
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
And(t8, instance_type, Operand(kExternalStringTag));
{
Label skip;
@@ -5234,12 +5783,12 @@
bind(&skip);
}
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
lw(t9, FieldMemOperand(value, String::kLengthOffset));
And(t8, instance_type, Operand(kStringEncodingMask));
{
@@ -5269,59 +5818,67 @@
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- lw(descriptors,
- FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, ¬_smi);
- LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
- bind(¬_smi);
+ lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ And(dst, dst, Operand(Map::EnumLengthBits::kMask));
+ SmiTag(dst);
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
- // Preload a couple of values used in the loop.
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = t3;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- mov(a1, a0);
+ Label next, start;
+ mov(a2, a0);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+ EnumLength(a3, a1);
+ Branch(
+ call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+
+ jmp(&start);
+
bind(&next);
-
- // Check that there are no elements. Register a1 contains the
- // current JS object we've reached through the prototype chain.
- lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
- Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in a2 for the subsequent
- // prototype load.
- lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(a3, call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (a3). This is the case if the next enumeration
- // index field does not contain a smi.
- lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(a3, call_runtime);
+ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- Branch(&check_prototype, eq, a1, Operand(a0));
- lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
- Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
+ EnumLength(a3, a1);
+ Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
- Branch(&next, ne, a1, Operand(null_value));
+ bind(&start);
+
+ // Check that there are no elements. Register a2 contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
+ Branch(call_runtime, ne, a2, Operand(at));
+
+ bind(&no_elements);
+ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ Branch(&next, ne, a2, Operand(null_value));
}
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- ASSERT(!output_reg.is(input_reg));
+ DCHECK(!output_reg.is(input_reg));
Label done;
li(output_reg, Operand(255));
// Normal branch: nop in delay slot.
@@ -5357,42 +5914,134 @@
// In 0-255 range, round and truncate.
bind(&in_bounds);
- round_w_d(temp_double_reg, input_reg);
+ cvt_w_d(temp_double_reg, input_reg);
mfc1(result_reg, temp_double_reg);
bind(&done);
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found,
+ Condition cond,
+ Label* allocation_memento_present) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ Addu(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+ li(at, Operand(new_space_allocation_top));
+ lw(at, MemOperand(at));
+ Branch(no_memento_found, gt, scratch_reg, Operand(at));
+ lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+ if (allocation_memento_present) {
+ Branch(allocation_memento_present, cond, scratch_reg,
+ Operand(isolate()->factory()->allocation_memento_map()));
+ }
}
-CodePatcher::CodePatcher(byte* address, int instructions)
+Register GetRegisterThatIsNotOneOf(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ DCHECK(!scratch1.is(scratch0));
+ Factory* factory = isolate()->factory();
+ Register current = scratch0;
+ Label loop_again;
+
+ // Scratch contained elements pointer.
+ Move(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
+ lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
+ lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
+ Branch(&loop_again, ne, current, Operand(factory->null_value()));
+}
+
+
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6,
+ Register reg7,
+ Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+
+
+CodePatcher::CodePatcher(byte* address,
+ int instructions,
+ FlushICache flush_cache)
: address_(address),
- instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+ masm_(NULL, address, size_ + Assembler::kGap),
+ flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ if (flush_cache_ == FLUSH) {
+ CpuFeatures::FlushICache(address_, size_);
+ }
// Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
@@ -5408,13 +6057,13 @@
void CodePatcher::ChangeBranchCondition(Condition cond) {
Instr instr = Assembler::instr_at(masm_.pc_);
- ASSERT(Assembler::IsBranch(instr));
+ DCHECK(Assembler::IsBranch(instr));
uint32_t opcode = Assembler::GetOpcodeField(instr);
// Currently only the 'eq' and 'ne' cond values are supported and the simple
// branch instructions (with opcode being the branch type).
// There are some special cases (see Assembler::IsBranch()) so extending this
// would be tricky.
- ASSERT(opcode == BEQ ||
+ DCHECK(opcode == BEQ ||
opcode == BNE ||
opcode == BLEZ ||
opcode == BGTZ ||
@@ -5428,6 +6077,29 @@
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ DCHECK(!dividend.is(result));
+ DCHECK(!dividend.is(at));
+ DCHECK(!result.is(at));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ li(at, Operand(mag.multiplier));
+ Mulh(result, dividend, Operand(at));
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) {
+ Addu(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && !neg && mag.multiplier > 0) {
+ Subu(result, result, Operand(dividend));
+ }
+ if (mag.shift > 0) sra(result, result, mag.shift);
+ srl(at, dividend, 31);
+ Addu(result, result, Operand(at));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS