Upgrade to 3.29
Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.
Bug: 17370214
Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f7db250..5033303 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,41 +1,22 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
-#if defined(V8_TARGET_ARCH_X64)
+#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/heap/heap.h"
+#include "src/isolate-inl.h"
+#include "src/serialize.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
@@ -43,7 +24,6 @@
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
@@ -53,34 +33,51 @@
}
-static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
+static const int64_t kInvalidRootRegisterDelta = -1;
+
+
+int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+ if (predictable_code_size() &&
+ (other.address() < reinterpret_cast<Address>(isolate()) ||
+ other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
+ return kInvalidRootRegisterDelta;
+ }
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
+ reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
+
+ int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
+ if (kPointerSize == kInt64Size) {
+ delta = other.address() - roots_register_value;
+ } else {
+ // For x32, zero extend the address to 64-bit and calculate the delta.
+ uint64_t o = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(other.address()));
+ uint64_t r = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(roots_register_value));
+ delta = o - r;
+ }
return delta;
}
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(target);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
- movq(scratch, target);
+ Move(scratch, target);
return Operand(scratch, 0);
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -88,18 +85,17 @@
if (destination.is(rax)) {
load_rax(source);
} else {
- movq(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
+ Move(kScratchRegister, source);
+ movp(destination, Operand(kScratchRegister, 0));
}
}
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(destination);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
}
@@ -107,36 +103,34 @@
if (source.is(rax)) {
store_rax(destination);
} else {
- movq(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
+ Move(kScratchRegister, destination);
+ movp(Operand(kScratchRegister, 0), source);
}
}
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
// Safe code.
- movq(destination, source);
+ Move(destination, source);
}
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ int64_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
@@ -145,14 +139,28 @@
return size;
}
}
- // Size of movq(destination, src);
- return 10;
+ // Size of movp(destination, src);
+ return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
+}
+
+
+void MacroAssembler::PushAddress(ExternalReference source) {
+ int64_t address = reinterpret_cast<int64_t>(source.address());
+ if (is_int32(address) && !serializer_enabled()) {
+ if (emit_debug_code()) {
+ Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
+ }
+ Push(Immediate(static_cast<int32_t>(address)));
+ return;
+ }
+ LoadAddress(kScratchRegister, source);
+ Push(kScratchRegister);
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
+ DCHECK(root_array_available_);
+ movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -160,8 +168,8 @@
void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
- ASSERT(root_array_available_);
- movq(destination,
+ DCHECK(root_array_available_);
+ movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
(fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
@@ -169,31 +177,31 @@
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ DCHECK(root_array_available_);
+ movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ DCHECK(root_array_available_);
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ DCHECK(root_array_available_);
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- ASSERT(!with.AddressUsesRegister(kScratchRegister));
+ DCHECK(root_array_available_);
+ DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
@@ -202,7 +210,7 @@
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
@@ -211,31 +219,30 @@
// Load store buffer top.
LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
+ movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
ret(0);
bind(&buffer_overflowed);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
bind(&done);
}
}
@@ -246,32 +253,36 @@
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled()) {
+ if (serializer_enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+ andp(scratch, kScratchRegister);
} else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ Move(scratch, ExternalReference::new_space_mask(isolate()));
+ andp(scratch, object);
}
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+ DCHECK(kPointerSize == kInt64Size
+ ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
+ : kPointerSize == kInt32Size);
intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
+ Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+ andp(scratch,
+ Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
}
@@ -284,12 +295,8 @@
Register dst,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
-
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -301,9 +308,9 @@
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
@@ -312,26 +319,28 @@
bind(&ok);
}
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
@@ -343,49 +352,115 @@
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ Move(value, kZapValue, Assembler::RelocInfoNone());
+ Move(index, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode fp_mode) {
+ DCHECK(!object.is(kScratchRegister));
+ DCHECK(!object.is(map));
+ DCHECK(!object.is(dst));
+ DCHECK(!map.is(dst));
+ AssertNotSmi(object);
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
if (emit_debug_code()) {
- AbortIfSmi(object);
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ CompareMap(map, isolate()->factory()->meta_map());
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
}
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // Compute the address.
+ leap(dst, FieldOperand(object, HeapObject::kMapOffset));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+
+ bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(map, kZapValue, Assembler::RelocInfoNone());
+ }
+}
+
+
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!object.is(value));
+ DCHECK(!object.is(address));
+ DCHECK(!value.is(address));
+ AssertNotSmi(object);
+
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -400,12 +475,14 @@
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
CheckPageFlag(object,
value, // Used as scratch.
@@ -414,22 +491,27 @@
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ Move(address, kZapValue, Assembler::RelocInfoNone());
+ Move(value, kZapValue, Assembler::RelocInfoNone());
}
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
+void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
+ if (emit_debug_code()) Check(cc, reason);
}
@@ -445,28 +527,28 @@
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
j(equal, &ok, Label::kNear);
- Abort("JSObject with fast elements map has slow elements");
+ Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
}
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cc, BailoutReason reason) {
Label L;
j(cc, &L, Label::kNear);
- Abort(msg);
+ Abort(reason);
// Control will not return here.
bind(&L);
}
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -487,72 +569,56 @@
}
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+void MacroAssembler::Abort(BailoutReason reason) {
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
}
+
+ if (FLAG_trap_on_abort) {
+ int3();
+ return;
+ }
#endif
- push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE);
- push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE);
- push(kScratchRegister);
+
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
+ Assembler::RelocInfoNone());
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
}
-void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
+ DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
+ DCHECK(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -560,44 +626,22 @@
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
+ if (!hash.is(index)) {
+ movl(index, hash);
+ }
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@@ -605,7 +649,7 @@
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
+ CEntryStub ces(isolate(), f->result_size, save_doubles);
CallStub(&ces);
}
@@ -615,7 +659,7 @@
Set(rax, num_arguments);
LoadAddress(rbx, ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
@@ -624,8 +668,8 @@
int num_arguments,
int result_size) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
+ // -- rsp[0] : return address
+ // -- rsp[8] : argument num_arguments - 1
// ...
// -- rsp[8 * num_arguments] : argument 0 (receiver)
// -----------------------------------
@@ -651,108 +695,168 @@
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
- ASSERT(static_cast<int>(offset) == offset);
+ DCHECK(static_cast<int>(offset) == offset);
return static_cast<int>(offset);
}
void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
-#ifdef _WIN64
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
EnterApiExitFrame(arg_stack_space);
-#endif
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- Label empty_result;
+void MacroAssembler::CallApiFunctionAndReturn(
+ Register function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
Factory* factory = isolate()->factory();
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
+ DCHECK(rdx.is(function_address) || r8.is(function_address));
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Register base_reg = r15;
- movq(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ Move(base_reg, next_address);
+ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
+ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1);
+ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+
+ Label profiler_disabled;
+ Label end_profiler_check;
+ Move(rax, ExternalReference::is_profiling_address(isolate()));
+ cmpb(Operand(rax, 0), Immediate(0));
+ j(zero, &profiler_disabled);
+
+ // Third parameter is the address of the actual getter function.
+ Move(thunk_last_arg, function_address);
+ Move(rax, thunk_ref);
+ jmp(&end_profiler_check);
+
+ bind(&profiler_disabled);
// Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
- RelocInfo::RUNTIME_ENTRY);
+ Move(rax, function_address);
+
+ bind(&end_profiler_check);
+
+ // Call the api function!
call(rax);
-#ifdef _WIN64
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(1);
+ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+ PopSafepointRegisters();
+ }
+
+ // Load the value from ReturnValue
+ movp(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
+ Move(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
- LeaveApiExitFrame();
+#if ENABLE_EXTRA_CHECKS
+ // Check if the function returned a valid JavaScript value.
+ Label ok;
+ Register return_value = rax;
+ Register map = rcx;
+
+ JumpIfSmi(return_value, &ok, Label::kNear);
+ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
+
+ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+ j(below, &ok, Label::kNear);
+
+ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ j(above_equal, &ok, Label::kNear);
+
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kTrueValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kFalseValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ CompareRoot(return_value, Heap::kNullValueRootIndex);
+ j(equal, &ok, Label::kNear);
+
+ Abort(kAPICallReturnedInvalidObject);
+
+ bind(&ok);
+#endif
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ movp(rsi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
- bind(&empty_result);
- // It was zero; the result is undefined.
- Move(rax, factory->undefined_value());
- jmp(&prologue);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
-#ifdef _WIN64
- LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
+ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
+ movp(prev_limit_reg, rax);
+ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
LoadAddress(rax,
ExternalReference::delete_handle_scope_extensions(isolate()));
call(rax);
- movq(rax, prev_limit_reg);
+ movp(rax, prev_limit_reg);
jmp(&leave_exit_frame);
}
@@ -761,7 +865,7 @@
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
+ CEntryStub ces(isolate(), result_size);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -770,32 +874,32 @@
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
+ InvokeCode(rdx, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
+ movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movp(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
+ DCHECK(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -821,14 +925,13 @@
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
}
@@ -841,22 +944,70 @@
Register exclusion2,
Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8()) {
+ movsxbq(dst, src);
+ } else if (r.IsUInteger8()) {
+ movzxbl(dst, src);
+ } else if (r.IsInteger16()) {
+ movsxwq(dst, src);
+ } else if (r.IsUInteger16()) {
+ movzxwl(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ movp(dst, src);
+ }
+}
+
+
+void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
+ DCHECK(!r.IsDouble());
+ if (r.IsInteger8() || r.IsUInteger8()) {
+ movb(dst, src);
+ } else if (r.IsInteger16() || r.IsUInteger16()) {
+ movw(dst, src);
+ } else if (r.IsInteger32()) {
+ movl(dst, src);
+ } else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
+ movp(dst, src);
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -865,22 +1016,73 @@
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE);
+ movq(dst, x);
}
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
+
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
+bool MacroAssembler::IsUnsafeInt(const int32_t x) {
+ static const int kMaxBits = 17;
+ return !is_intn(x, kMaxBits);
+}
+
+
+void MacroAssembler::SafeMove(Register dst, Smi* src) {
+ DCHECK(!dst.is(kScratchRegister));
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(dst, kScratchRegister);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ movp(dst, Immediate(value ^ jit_cookie()));
+ xorp(dst, Immediate(jit_cookie()));
+ }
+ } else {
+ Move(dst, src);
+ }
+}
+
+
+void MacroAssembler::SafePush(Smi* src) {
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(Operand(rsp, 0), kScratchRegister);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ Push(Immediate(value ^ jit_cookie()));
+ xorp(Operand(rsp, 0), Immediate(jit_cookie()));
+ }
+ } else {
+ Push(src);
+ }
+}
+
+
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -894,20 +1096,13 @@
return kScratchRegister;
}
+
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
- cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, "Uninitialized kSmiConstantRegister");
- } else {
- Label ok;
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
+ Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
+ Assembler::RelocInfoNone());
+ cmpp(dst, kSmiConstantRegister);
+ Assert(equal, kUninitializedKSmiConstantRegister);
}
int value = source->value();
if (value == 0) {
@@ -919,37 +1114,41 @@
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
- movq(dst, kSmiConstantRegister);
+ movp(dst, kSmiConstantRegister);
break;
case 0:
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+ Move(dst, source, Assembler::RelocInfoNone());
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
@@ -959,7 +1158,7 @@
if (!dst.is(src)) {
movl(dst, src);
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
@@ -968,15 +1167,18 @@
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok, Label::kNear);
- if (allow_stub_calls()) {
- Abort("Integer32ToSmiField writing to non-smi location");
- } else {
- int3();
- }
+ Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+
+ if (SmiValuesAre32Bits()) {
+ DCHECK(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Integer32ToSmi(kScratchRegister, src);
+ movp(dst, kScratchRegister);
+ }
}
@@ -988,126 +1190,150 @@
} else {
leal(dst, Operand(src, constant));
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
- shr(dst, Immediate(kSmiShift));
+
+ if (SmiValuesAre32Bits()) {
+ shrp(dst, Immediate(kSmiShift));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ sarl(dst, Immediate(kSmiShift));
+ }
}
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movl(dst, src);
+ sarl(dst, Immediate(kSmiShift));
+ }
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
STATIC_ASSERT(kSmiTag == 0);
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
- sar(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(kSmiShift));
+ if (kPointerSize == kInt32Size) {
+ // Sign extend to 64-bit.
+ movsxlq(dst, dst);
+ }
}
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movp(dst, src);
+ SmiToInteger64(dst, dst);
+ }
}
void MacroAssembler::SmiTest(Register src) {
- testq(src, src);
+ AssertSmi(src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- if (emit_debug_code()) {
- AbortIfNotSmi(smi1);
- AbortIfNotSmi(smi2);
- }
- cmpq(smi1, smi2);
+ AssertSmi(smi1);
+ AssertSmi(smi2);
+ cmpp(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
Cmp(dst, src);
}
void MacroAssembler::Cmp(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
- cmpq(dst, src);
+ AssertSmi(dst);
+ AssertSmi(src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
- cmpq(dst, src);
+ AssertSmi(dst);
+ AssertSmi(src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
+ AssertSmi(dst);
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ cmpl(dst, Immediate(src));
}
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
- ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ DCHECK(!dst.AddressUsesRegister(smi_reg));
+ cmpp(dst, smi_reg);
}
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, dst);
+ cmpl(kScratchRegister, src);
+ }
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
+ DCHECK(power >= 0);
+ DCHECK(power < 64);
if (power == 0) {
SmiToInteger64(dst, src);
return;
}
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
+ sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
+ shlp(dst, Immediate(power - kSmiShift));
}
}
@@ -1115,9 +1341,9 @@
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power) {
- ASSERT((0 <= power) && (power < 32));
+ DCHECK((0 <= power) && (power < 32));
if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
+ shrp(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -1128,15 +1354,15 @@
Label* on_not_smis,
Label::Distance near_jump) {
if (dst.is(src1) || dst.is(src2)) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ movp(kScratchRegister, src1);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
+ movp(dst, kScratchRegister);
} else {
- movq(dst, src1);
- or_(dst, src2);
+ movp(dst, src1);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1159,8 +1385,8 @@
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
+ movp(kScratchRegister, src);
+ rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
}
@@ -1171,8 +1397,15 @@
return CheckSmi(first);
}
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
+ if (SmiValuesAre32Bits()) {
+ leal(kScratchRegister, Operand(first, second, times_1, 0));
+ testb(kScratchRegister, Immediate(0x03));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ }
return zero;
}
@@ -1182,9 +1415,9 @@
if (first.is(second)) {
return CheckNonNegativeSmi(first);
}
- movq(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
+ movp(kScratchRegister, first);
+ orp(kScratchRegister, second);
+ rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
}
@@ -1210,24 +1443,36 @@
Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(!src.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
+ if (SmiValuesAre32Bits()) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ cmpl(src, Immediate(0xc0000000));
+ return positive;
+ }
}
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
+ if (SmiValuesAre32Bits()) {
+ // An unsigned 32-bit integer value is valid as long as the high bit
+ // is not set.
+ testl(src, src);
+ return positive;
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ testl(src, Immediate(0xc0000000));
+ return zero;
+ }
}
@@ -1252,6 +1497,14 @@
}
+void MacroAssembler::JumpIfValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
@@ -1260,6 +1513,14 @@
}
+void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
@@ -1319,71 +1580,49 @@
}
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
return;
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1392,7 +1631,13 @@
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
+ if (SmiValuesAre32Bits()) {
+ addl(Operand(dst, kSmiShift / kBitsPerByte),
+ Immediate(constant->value()));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ addp(dst, Immediate(constant));
+ }
}
}
@@ -1400,23 +1645,41 @@
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
+ DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ subp(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ subp(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
+ } else {
+ CHECK(mode.IsEmpty());
+ }
} else {
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ addp(dst, src);
+ j(overflow, bailout_label, near_jump);
}
}
@@ -1424,22 +1687,22 @@
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1448,43 +1711,49 @@
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
+ SmiOperationExecutionMode mode,
+ Label* bailout_label,
Label::Distance near_jump) {
if (constant->value() == 0) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ DCHECK(!dst.is(kScratchRegister));
+ LoadSmiConstant(kScratchRegister, constant);
+ subp(dst, kScratchRegister);
+ if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
+ j(no_overflow, bailout_label, near_jump);
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ addp(dst, kScratchRegister);
+ } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
+ if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
+ Label done;
+ j(no_overflow, &done, Label::kNear);
+ addp(dst, kScratchRegister);
+ jmp(bailout_label, near_jump);
+ bind(&done);
+ } else {
+ // Bailout if overflow without reserving src.
+ j(overflow, bailout_label, near_jump);
+ }
} else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
+ CHECK(mode.IsEmpty());
}
} else {
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ DCHECK(!dst.is(kScratchRegister));
+ movp(dst, src);
+ LoadSmiConstant(kScratchRegister, constant);
+ subp(dst, kScratchRegister);
+ j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
+ addp(dst, src);
+ j(overflow, bailout_label, near_jump);
}
}
}
@@ -1495,40 +1764,54 @@
Label* on_smi_result,
Label::Distance near_jump) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ DCHECK(!dst.is(kScratchRegister));
+ movp(kScratchRegister, src);
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
+ movp(src, kScratchRegister);
} else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
+ movp(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
}
+template<class T>
+static void SmiAddHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src1)) {
+ Label done;
+ masm->addp(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->subp(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
+ } else {
+ masm->movp(dst, src1);
+ masm->addp(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
+ }
+}
+
+
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!dst.is(src2));
+ SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1537,18 +1820,9 @@
const Operand& src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!src2.AddressUsesRegister(dst));
+ SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1559,14 +1833,37 @@
// overflowing is impossible.
if (!dst.is(src1)) {
if (emit_debug_code()) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- Check(no_overflow, "Smi addition overflow");
+ movp(kScratchRegister, src1);
+ addp(kScratchRegister, src2);
+ Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
- Assert(no_overflow, "Smi addition overflow");
+ addp(dst, src2);
+ Assert(no_overflow, kSmiAdditionOverflow);
+ }
+}
+
+
+template<class T>
+static void SmiSubHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (dst.is(src1)) {
+ Label done;
+ masm->subp(dst, src2);
+ masm->j(no_overflow, &done, Label::kNear);
+ // Restore src1.
+ masm->addp(dst, src2);
+ masm->jmp(on_not_smi_result, near_jump);
+ masm->bind(&done);
+ } else {
+ masm->movp(dst, src1);
+ masm->subp(dst, src2);
+ masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1576,29 +1873,9 @@
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!dst.is(src2));
+ SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
@@ -1607,30 +1884,37 @@
const Operand& src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!src2.AddressUsesRegister(dst));
+ SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
+}
+
+
+template<class T>
+static void SmiSubNoOverflowHelper(MacroAssembler* masm,
+ Register dst,
+ Register src1,
+ T src2) {
+ // No overflow checking. Use only when it's known that
+ // overflowing is impossible (e.g., subtracting two positive smis).
+ if (!dst.is(src1)) {
+ masm->movp(dst, src1);
}
+ masm->subp(dst, src2);
+ masm->Assert(no_overflow, kSmiSubtractionOverflow);
+}
+
+
+void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
+ DCHECK(!dst.is(src2));
+ SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
}
void MacroAssembler::SmiSub(Register dst,
Register src1,
const Operand& src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
+ SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
}
@@ -1639,31 +1923,31 @@
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
if (dst.is(src1)) {
Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
+ movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
- movq(dst, kScratchRegister);
- xor_(dst, src2);
+ movp(dst, kScratchRegister);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&zero_correct_result);
@@ -1672,17 +1956,17 @@
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ movp(kScratchRegister, src1);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1694,19 +1978,19 @@
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src2.is(rax));
+ DCHECK(!src2.is(rdx));
+ DCHECK(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
// We need to rule out dividing Smi::kMinValue by -1, since that would
@@ -1717,12 +2001,12 @@
// We overshoot a little and go to slow case if we divide min-value
// by any negative value, not just -1.
Label safe_div;
- testl(rax, Immediate(0x7fffffff));
+ testl(rax, Immediate(~Smi::kMinValue));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
} else {
j(negative, on_not_smi_result, near_jump);
@@ -1739,14 +2023,14 @@
if (src1.is(rax)) {
Label smi_result;
j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
jmp(on_not_smi_result, near_jump);
bind(&smi_result);
} else {
j(not_zero, on_not_smi_result, near_jump);
}
if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
Integer32ToSmi(dst, rax);
}
@@ -1757,19 +2041,19 @@
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!src2.is(rax));
+ DCHECK(!src2.is(rdx));
+ DCHECK(!src1.is(rdx));
+ DCHECK(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
- movq(kScratchRegister, src1);
+ movp(kScratchRegister, src1);
}
SmiToInteger32(rax, src1);
SmiToInteger32(src2, src2);
@@ -1783,7 +2067,7 @@
// Retag inputs and go slow case.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
jmp(on_not_smi_result, near_jump);
bind(&safe_div);
@@ -1794,14 +2078,14 @@
// Restore smi tags on inputs.
Integer32ToSmi(src2, src2);
if (src1.is(rax)) {
- movq(src1, kScratchRegister);
+ movp(src1, kScratchRegister);
}
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, go slow to return a floating point negative zero.
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
@@ -1809,25 +2093,31 @@
void MacroAssembler::SmiNot(Register dst, Register src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
- if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
+ if (SmiValuesAre32Bits()) {
+ // Set tag and padding bits before negating, so that they are zero
+ // afterwards.
+ movl(kScratchRegister, Immediate(~0));
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ DCHECK(SmiValuesAre31Bits());
+ movl(kScratchRegister, Immediate(1));
}
- not_(dst);
+ if (dst.is(src)) {
+ xorp(dst, kScratchRegister);
+ } else {
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
+ }
+ notp(dst);
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(src2));
if (!dst.is(src1)) {
- movq(dst, src1);
+ movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1835,54 +2125,54 @@
if (constant->value() == 0) {
Set(dst, 0);
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
- movq(dst, src1);
+ DCHECK(!src1.is(src2));
+ movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
- movq(dst, src1);
+ DCHECK(!src1.is(src2));
+ movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -1890,11 +2180,11 @@
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
- ASSERT(is_uint5(shift_value));
+ DCHECK(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
@@ -1904,12 +2194,27 @@
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift_value > 0) {
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ shlq(dst, Immediate(shift_value & 0x1f));
+ }
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ SmiToInteger32(dst, src);
+ shll(dst, Immediate(shift_value));
+ JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
@@ -1921,29 +2226,73 @@
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movq(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(src, src);
j(negative, on_not_smi_result, near_jump);
}
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ if (SmiValuesAre32Bits()) {
+ movp(dst, src);
+ shrp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(dst, src);
+ shrp(dst, Immediate(shift_value));
+ JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ DCHECK(!dst.is(rcx));
+ if (!dst.is(src1)) {
+ movp(dst, src1);
+ }
+ // Untag shift amount.
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ andp(rcx, Immediate(0x1f));
+ shlq_cl(dst);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(rcx));
+
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shll_cl(dst);
+ JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
+ }
}
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
}
@@ -1952,36 +2301,34 @@
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(rcx));
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
}
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
} else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shrl_cl(dst);
+ JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
}
}
@@ -1989,27 +2336,18 @@
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(rcx));
+
SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
+ if (!dst.is(src1)) {
+ movp(dst, src1);
}
+ SmiToInteger32(dst, dst);
+ sarl_cl(dst);
+ Integer32ToSmi(dst, dst);
}
@@ -2018,36 +2356,34 @@
Register src2,
Label* on_not_smis,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src1));
+ DCHECK(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
// Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ movp(dst, src1);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2055,41 +2391,212 @@
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ DCHECK(is_uint6(shift));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ // We have to sign extend the index register to 64-bit as the SMI might
+ // be negative.
+ movsxlq(dst, dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
+
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ // Register src holds a positive smi.
+ DCHECK(is_uint6(shift));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negp(dst);
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negq(dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ DCHECK_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, src);
+ addl(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ Push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Register constant = GetSmiConstant(source);
+ Push(constant);
+ }
+}
+
+
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+ DCHECK(!src.is(scratch));
+ movp(scratch, src);
+ // High bits.
+ shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ shlp(src, Immediate(kSmiShift));
+ Push(src);
+ // Low bits.
+ shlp(scratch, Immediate(kSmiShift));
+ Push(scratch);
+}
+
+
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+ DCHECK(!dst.is(scratch));
+ Pop(scratch);
+ // Low bits.
+ shrp(scratch, Immediate(kSmiShift));
+ Pop(dst);
+ shrp(dst, Immediate(kSmiShift));
+ // High bits.
+ shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ orp(dst, scratch);
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ if (SmiValuesAre32Bits()) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ testl(src, Immediate(source));
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shrl(mask, Immediate(1));
+ subp(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ isolate()->factory()->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ Register index = scratch;
+ Register probe = mask;
+ movp(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache);
+
+ bind(&is_smi);
+ SmiToInteger32(scratch, object);
+ andp(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ cmpp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ movp(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
}
@@ -2104,211 +2611,339 @@
}
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
+ Register first_object, Register second_object, Register scratch1,
+ Register scratch2, Label* on_fail, Label::Distance near_jump) {
// Check that both objects are not smis.
Condition either_smi = CheckEitherSmi(first_object, second_object);
j(either_smi, on_fail, near_jump);
// Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
+ // Check that both are flat one-byte strings.
+ DCHECK(kNotStringTag != 0);
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
+ andl(scratch1, Immediate(kFlatOneByteStringMask));
+ andl(scratch2, Immediate(kFlatOneByteStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
j(not_equal, on_fail, near_jump);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure,
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+ Register instance_type, Register scratch, Label* failure,
Label::Distance near_jump) {
if (!scratch.is(instance_type)) {
movl(scratch, instance_type);
}
- const int kFlatAsciiStringMask =
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ andl(scratch, Immediate(kFlatOneByteStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
j(not_equal, failure, near_jump);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first_object_instance_type, Register second_object_instance_type,
+ Register scratch1, Register scratch2, Label* on_fail,
Label::Distance near_jump) {
// Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
+ movp(scratch1, first_object_instance_type);
+ movp(scratch2, second_object_instance_type);
- // Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
+ // Check that both are flat one-byte strings.
+ DCHECK(kNotStringTag != 0);
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatOneByteStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
+ andl(scratch1, Immediate(kFlatOneByteStringMask));
+ andl(scratch2, Immediate(kFlatOneByteStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
j(not_equal, on_fail, near_jump);
}
+template<class T>
+static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
+ T operand_or_register,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ masm->testb(operand_or_register,
+ Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ masm->j(zero, &succeed, Label::kNear);
+ masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
+ masm->j(not_equal, not_unique_name, distance);
+
+ masm->bind(&succeed);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+ Label* not_unique_name,
+ Label::Distance distance) {
+ JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
+}
+
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
- movq(dst, src);
+ movp(dst, src);
}
}
void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
+ MoveHeapObject(dst, source);
}
}
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- movq(dst, kScratchRegister);
+ MoveHeapObject(kScratchRegister, source);
+ movp(dst, kScratchRegister);
}
}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ MoveHeapObject(kScratchRegister, source);
+ cmpp(dst, kScratchRegister);
}
}
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- cmpq(dst, kScratchRegister);
+ MoveHeapObject(kScratchRegister, source);
+ cmpp(dst, kScratchRegister);
}
}
void MacroAssembler::Push(Handle<Object> source) {
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ MoveHeapObject(kScratchRegister, source);
+ Push(kScratchRegister);
}
}
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
+void MacroAssembler::MoveHeapObject(Register result,
+ Handle<Object> object) {
+ AllowDeferredHandleDereference using_raw_address;
+ DCHECK(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(result, Operand(result, 0));
+ Handle<Cell> cell = isolate()->factory()->NewCell(object);
+ Move(result, cell, RelocInfo::CELL);
+ movp(result, Operand(result, 0));
} else {
- Move(result, object);
+ Move(result, object, RelocInfo::EMBEDDED_OBJECT);
}
}
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
- }
-}
-
-
-void MacroAssembler::LoadGlobalCell(Register dst,
- Handle<JSGlobalPropertyCell> cell) {
+void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
if (dst.is(rax)) {
- load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ AllowDeferredHandleDereference embedding_raw_address;
+ load_rax(cell.location(), RelocInfo::CELL);
} else {
- movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(dst, Operand(dst, 0));
- }
-}
-
-
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
+ Move(dst, cell, RelocInfo::CELL);
+ movp(dst, Operand(dst, 0));
}
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
}
}
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
+void MacroAssembler::DropUnderReturnAddress(int stack_elements,
+ Register scratch) {
+ DCHECK(stack_elements > 0);
+ if (kPointerSize == kInt64Size && stack_elements == 1) {
+ popq(MemOperand(rsp, 0));
+ return;
+ }
+
+ PopReturnAddressTo(scratch);
+ Drop(stack_elements);
+ PushReturnAddressFrom(scratch);
}
-void MacroAssembler::TestBit(const Operand& src, int bits) {
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ // x32 uses 64-bit push for rbp in the prologue.
+ DCHECK(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::PushQuad(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ pushq(kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
+ }
+}
+
+
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ DCHECK(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
+ }
+}
+
+
+void MacroAssembler::PopQuad(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ popq(kScratchRegister);
+ movp(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset) {
+ DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt64Size) {
+ movsxlq(dst, FieldOperand(base, offset));
+ } else {
+ movp(dst, FieldOperand(base, offset));
+ SmiToInteger32(dst, dst);
+ }
+}
+
+
+void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bits) {
+ DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt32Size) {
+ // On x32, this field is represented by SMI.
+ bits += kSmiShift;
+ }
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+ testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
}
@@ -2318,8 +2953,18 @@
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
@@ -2332,8 +2977,8 @@
int MacroAssembler::CallSize(ExternalReference ext) {
// Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- const int kCallInstructionSize = 3;
- return LoadAddressSize(ext) + kCallInstructionSize;
+ return LoadAddressSize(ext) +
+ Assembler::kCallScratchRegisterInstructionLength;
}
@@ -2349,11 +2994,21 @@
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
+ int end_position = pc_offset() + CallSize(destination);
#endif
- movq(kScratchRegister, destination, rmode);
+ Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
#ifdef DEBUG
CHECK_EQ(pc_offset(), end_position);
@@ -2363,11 +3018,12 @@
void MacroAssembler::Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
- unsigned ast_id) {
+ TypeFeedbackId ast_id) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsCodeTarget(rmode) ||
+ rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
CHECK_EQ(end_position, pc_offset());
@@ -2376,26 +3032,26 @@
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
@@ -2403,23 +3059,23 @@
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2446,13 +3102,19 @@
};
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
+ const Immediate& imm) {
+ movp(SafepointRegisterSlot(dst), imm);
+}
+
+
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
+ movp(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
+ movp(dst, SafepointRegisterSlot(src));
}
@@ -2464,7 +3126,8 @@
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2477,33 +3140,33 @@
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
+ movp(ExternalOperand(handler_address), rsp);
}
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2511,18 +3174,20 @@
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
+ movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
+ shrp(rdx, Immediate(StackHandler::kKindWidth));
+ movp(rdx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2531,29 +3196,29 @@
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
+ movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
JumpToHandlerEntry();
@@ -2562,7 +3227,8 @@
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
+ kFPOnStackSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
@@ -2571,7 +3237,7 @@
// The exception is expected in rax.
if (!value.is(rax)) {
- movq(rax, value);
+ movp(rax, value);
}
// Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -2581,7 +3247,7 @@
Label fetch_next, check_kind;
jmp(&check_kind, Label::kNear);
bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+ movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
@@ -2590,15 +3256,15 @@
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
@@ -2613,9 +3279,9 @@
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
- pop(scratch);
- addq(rsp, Immediate(bytes_dropped));
- push(scratch);
+ PopReturnAddressTo(scratch);
+ addp(rsp, Immediate(bytes_dropped));
+ PushReturnAddressFrom(scratch);
ret(0);
}
}
@@ -2630,7 +3296,7 @@
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
@@ -2644,10 +3310,12 @@
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
@@ -2655,23 +3323,26 @@
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
- STATIC_ASSERT(FAST_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleyElementValue));
j(above, fail, distance);
}
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+void MacroAssembler::CheckFastSmiElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
j(above, fail, distance);
}
@@ -2681,7 +3352,8 @@
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -2700,7 +3372,8 @@
bind(¬_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
jmp(&done);
@@ -2713,8 +3386,9 @@
bind(&is_nan);
// Convert all NaNs to the same canonical NaN value when they are stored in
// the double array.
- Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ Set(kScratchRegister,
+ bit_cast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
movq(xmm_scratch, kScratchRegister);
jmp(&have_double_value, Label::kNear);
@@ -2722,55 +3396,29 @@
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ Cvtlsi2sd(xmm_scratch, kScratchRegister);
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
bind(&done);
}
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
+void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- Map* transitioned_fast_element_map(
- map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
- ASSERT(transitioned_fast_element_map == NULL ||
- map->elements_kind() != FAST_ELEMENTS);
- if (transitioned_fast_element_map != NULL) {
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(transitioned_fast_element_map));
- }
-
- Map* transitioned_double_map(
- map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
- ASSERT(transitioned_double_map == NULL ||
- map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
- if (transitioned_double_map != NULL) {
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(transitioned_double_map));
- }
- }
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
+ SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- Label success;
- CompareMap(obj, map, &success, mode);
+ CompareMap(obj, map);
j(not_equal, fail);
- bind(&success);
}
@@ -2786,37 +3434,138 @@
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg) {
+ Register result_reg) {
Label done;
- Set(result_reg, 0);
+ Label conv_failure;
xorps(temp_xmm_reg, temp_xmm_reg);
- ucomisd(input_reg, temp_xmm_reg);
- j(below, &done, Label::kNear);
- uint64_t one_half = BitCast<uint64_t, double>(0.5);
- Set(temp_reg, one_half);
- movq(temp_xmm_reg, temp_reg);
- addsd(temp_xmm_reg, input_reg);
- cvttsd2si(result_reg, temp_xmm_reg);
+ cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
+ movl(result_reg, Immediate(0));
+ setcc(sign, result_reg);
+ subl(result_reg, Immediate(1));
+ andl(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
+ Set(result_reg, 0);
+ ucomisd(input_reg, temp_xmm_reg);
+ j(below, &done, Label::kNear);
Set(result_reg, 255);
bind(&done);
}
+void MacroAssembler::LoadUint32(XMMRegister dst,
+ Register src) {
+ if (FLAG_debug_code) {
+ cmpq(src, Immediate(0xffffffff));
+ Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
+ }
+ cvtqsi2sd(dst, src);
+}
+
+
+void MacroAssembler::SlowTruncateToI(Register result_reg,
+ Register input_reg,
+ int offset) {
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
+ Register input_reg) {
+ Label done;
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ cvttsd2siq(result_reg, xmm0);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
+
+ // Slow case.
+ if (input_reg.is(result_reg)) {
+ subp(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), xmm0);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addp(rsp, Immediate(kDoubleSize));
+ } else {
+ SlowTruncateToI(result_reg, input_reg);
+ }
+
+ bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result_reg,
+ XMMRegister input_reg) {
+ Label done;
+ cvttsd2siq(result_reg, input_reg);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
+
+ subp(rsp, Immediate(kDoubleSize));
+ movsd(MemOperand(rsp, 0), input_reg);
+ SlowTruncateToI(result_reg, rsp, 0);
+ addp(rsp, Immediate(kDoubleSize));
+
+ bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
+}
+
+
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
+ XMMRegister scratch,
+ MinusZeroMode minus_zero_mode,
+ Label* lost_precision, Label* is_nan,
+ Label* minus_zero, Label::Distance dst) {
+ cvttsd2si(result_reg, input_reg);
+ Cvtlsi2sd(xmm0, result_reg);
+ ucomisd(xmm0, input_reg);
+ j(not_equal, lost_precision, dst);
+ j(parity_even, is_nan, dst); // NaN.
+ if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
+ Label done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ testl(result_reg, result_reg);
+ j(not_zero, &done, Label::kNear);
+ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // jump to minus_zero.
+ andl(result_reg, Immediate(1));
+ j(not_zero, minus_zero, dst);
+ bind(&done);
+ }
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- movq(descriptors, FieldOperand(map,
- Map::kInstanceDescriptorsOrBitField3Offset));
- Label not_smi;
- JumpIfNotSmi(descriptors, ¬_smi, Label::kNear);
- Move(descriptors, isolate()->factory()->empty_descriptor_array());
- bind(¬_smi);
+ movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
+ andl(dst, Immediate(Map::EnumLengthBits::kMask));
+ Integer32ToSmi(dst, dst);
}
void MacroAssembler::DispatchMap(Register obj,
+ Register unused,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type) {
@@ -2831,61 +3580,101 @@
}
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok, Label::kNear);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, &ok, Label::kNear);
+ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, kOperandIsNotANumber);
+ bind(&ok);
+ }
}
-void MacroAssembler::AbortIfSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(NegateCondition(is_smi), "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(NegateCondition(is_smi), kOperandIsASmi);
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, kOperandIsNotASmi);
+ }
}
-void MacroAssembler::AbortIfNotSmi(const Operand& object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(const Operand& object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, kOperandIsNotASmi);
+ }
}
-void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
- ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
- cmpq(kScratchRegister, int32_register);
- Assert(above_equal, "32 bit value in register is not zero-extended");
+void MacroAssembler::AssertZeroExtended(Register int32_register) {
+ if (emit_debug_code()) {
+ DCHECK(!int32_register.is(kScratchRegister));
+ movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
+ cmpq(kScratchRegister, int32_register);
+ Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- testb(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAString);
+ Push(object);
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ Pop(object);
+ Check(below, kOperandIsNotAString);
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
+void MacroAssembler::AssertName(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAName);
+ Push(object);
+ movp(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, LAST_NAME_TYPE);
+ Pop(object);
+ Check(below_equal, kOperandIsNotAName);
+ }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ BailoutReason reason) {
+ if (emit_debug_code()) {
+ DCHECK(!src.is(kScratchRegister));
+ LoadRoot(kScratchRegister, root_value_index);
+ cmpp(src, kScratchRegister);
+ Check(equal, reason);
+ }
}
@@ -2893,7 +3682,7 @@
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
testb(instance_type, Immediate(kIsNotStringMask));
@@ -2901,37 +3690,47 @@
}
+Condition MacroAssembler::IsObjectNameType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
+ return below_equal;
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss,
bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
+ Label non_instance;
if (miss_on_bound_function) {
- movq(kScratchRegister,
+ // Check that the receiver isn't a smi.
+ testl(function, Immediate(kSmiTagMask));
+ j(zero, miss);
+
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
+
+ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
+ TestBitSharedFunctionInfoSpecialField(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
+
+ // Make sure that the function has an instance prototype.
+ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance, Label::kNear);
}
- // Make sure that the function has an instance prototype.
- Label non_instance;
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, Label::kNear);
-
// Get the prototype or initial map from the function.
- movq(result,
+ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
@@ -2946,13 +3745,16 @@
j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done, Label::kNear);
+ movp(result, FieldOperand(result, Map::kPrototypeOffset));
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
+ if (miss_on_bound_function) {
+ jmp(&done, Label::kNear);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
+ }
// All done.
bind(&done);
@@ -2968,7 +3770,7 @@
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
@@ -2981,7 +3783,7 @@
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
@@ -2993,39 +3795,22 @@
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
+ CEntryStub ces(isolate(), 1);
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be rcx to
- // follow the calling convention which requires the call type to be
- // in rcx.
- ASSERT(dst.is(rcx));
- if (call_kind == CALL_AS_FUNCTION) {
- LoadSmiConstant(dst, Smi::FromInt(1));
- } else {
- LoadSmiConstant(dst, Smi::FromInt(0));
- }
-}
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
@@ -3037,17 +3822,14 @@
&definitely_mismatches,
flag,
Label::kNear,
- call_wrapper,
- call_kind);
+ call_wrapper);
if (!definitely_mismatches) {
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
+ DCHECK(flag == JUMP_FUNCTION);
jmp(code);
}
bind(&done);
@@ -3055,85 +3837,52 @@
}
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
- }
- bind(&done);
- }
-}
-
-
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ DCHECK(function.is(rdi));
+ movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ // You can't call a function without a valid frame.
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+ DCHECK(function.is(rdi));
+ movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
+ // Advances rdx to the end of the Code object header, to the start of
+ // the executable code.
+ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+
+ InvokeCode(rdx, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+ const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(rdi, function);
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+ const CallWrapper& call_wrapper) {
+ Move(rdi, function);
+ InvokeFunction(rdi, expected, actual, flag, call_wrapper);
}
@@ -3145,13 +3894,12 @@
bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
+ const CallWrapper& call_wrapper) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
@@ -3173,39 +3921,37 @@
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
- ASSERT(expected.reg().is(rbx));
+ DCHECK(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
+ DCHECK(actual.reg().is(rax));
+ DCHECK(expected.reg().is(rbx));
}
}
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
+ movp(rdx, code_register);
}
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(rcx, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
jmp(done, near_jump);
}
} else {
- SetCallKind(rcx, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
@@ -3213,19 +3959,44 @@
}
+void MacroAssembler::StubPrologue() {
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
+ Push(Smi::FromInt(StackFrame::STUB));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
+ } else {
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
- movq(rbp, rsp);
- push(rsi); // Context.
+ pushq(rbp);
+ movp(rbp, rsp);
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister);
if (emit_debug_code()) {
- movq(kScratchRegister,
+ Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, "code object not properly patched");
+ cmpp(Operand(rsp, 0), kScratchRegister);
+ Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3233,32 +4004,33 @@
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
- Check(equal, "stack frame types must match");
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ Check(equal, kStackFrameTypesMustMatch);
}
- movq(rsp, rbp);
- pop(rbp);
+ movp(rsp, rbp);
+ popq(rbp);
}
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
- movq(rbp, rsp);
+ DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
+ kFPOnStackSize + kPCOnStackSize);
+ DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ pushq(rbp);
+ movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ Push(Immediate(0)); // Saved entry sp, patched before call.
+ Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
+ movp(r14, rax); // Backup rax in callee-save register.
}
Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
@@ -3274,28 +4046,28 @@
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kNumRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
- subq(rsp, Immediate(space));
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
+ arg_stack_space * kRegisterSize;
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
+ DCHECK(is_int8(kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+ movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
@@ -3305,7 +4077,7 @@
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -3322,48 +4094,49 @@
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
}
// Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
+ movp(rcx, Operand(rbp, kFPOnStackSize));
+ movp(rbp, Operand(rbp, 0 * kPointerSize));
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
- // Push the return address to get ready to return.
- push(rcx);
+ PushReturnAddressFrom(rcx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveApiExitFrame() {
- movq(rsp, rbp);
- pop(rbp);
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
+ movp(rsp, rbp);
+ popq(rbp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
+ if (restore_context) {
+ movp(rsi, context_operand);
+ }
#ifdef DEBUG
- movq(context_operand, Immediate(0));
+ movp(context_operand, Immediate(0));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
isolate());
Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
+ movp(c_entry_fp_operand, Immediate(0));
}
@@ -3372,30 +4145,31 @@
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!scratch.is(kScratchRegister));
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+ movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
+ cmpp(scratch, Immediate(0));
+ Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
- // Load the global context of the current context.
- int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ // Load the native context of the current context.
+ int offset =
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+ movp(scratch, FieldOperand(scratch, offset));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- isolate()->factory()->global_context_map());
- Check(equal, "JSGlobalObject::global_context should be a global context.");
+ isolate()->factory()->native_context_map());
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3403,33 +4177,37 @@
// compatible with the security token in the receiving global
// object.
- // Check the context is a global context.
+ // Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
- movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ Push(holder_reg);
+ movp(holder_reg,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
+ Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
- // Read the first word and compare to global_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
- Check(equal, "JSGlobalObject::global_context should be a global context.");
- pop(holder_reg);
+ // Read the first word and compare to native_context_map(),
+ movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+ CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
+ Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ Pop(holder_reg);
}
- movq(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ movp(kScratchRegister,
+ FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ movp(scratch, FieldOperand(scratch, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
}
+// Compute the hash code from the untagged key. This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -3504,26 +4282,25 @@
decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
+ for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
+ movp(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
+ if (i != (kNumberDictionaryProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
@@ -3534,7 +4311,7 @@
// Check that the value is a normal propety.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
+ DCHECK_EQ(NORMAL, 0);
Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::kMask));
j(not_zero, miss);
@@ -3542,25 +4319,25 @@
// Get the value at the masked, scaled index.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
// No use of scratch if allocation top is provided.
- ASSERT(!scratch.is_valid());
+ DCHECK(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(new_space_allocation_top);
- cmpq(result, top_operand);
- Check(equal, "Unexpected allocation top");
+ Operand top_operand = ExternalOperand(allocation_top);
+ cmpp(result, top_operand);
+ Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
@@ -3568,40 +4345,78 @@
// Move address of new object to result. Use scratch register if available,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
- LoadAddress(scratch, new_space_allocation_top);
- movq(result, Operand(scratch, 0));
+ LoadAddress(scratch, allocation_top);
+ movp(result, Operand(scratch, 0));
} else {
- Load(result, new_space_allocation_top);
+ Load(result, allocation_top);
+ }
+}
+
+
+void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (kPointerSize == kDoubleSize) {
+ if (FLAG_debug_code) {
+ testl(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+ } else {
+ // Align the next allocation. Storing the filler map without checking top
+ // is safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerSize * 2 == kDoubleSize);
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ // Make sure scratch is not clobbered by this function as it might be
+ // used in UpdateAllocationTopHelper later.
+ DCHECK(!scratch.is(kScratchRegister));
+ Label aligned;
+ testl(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ cmpp(result, ExternalOperand(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
+ movp(Operand(result, 0), kScratchRegister);
+ addp(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
+ Register scratch,
+ AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
+ testp(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, kUnalignedAllocationInNewSpace);
}
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
+ movp(Operand(scratch, 0), result_end);
} else {
- Store(new_space_allocation_top, result_end);
+ Store(allocation_top, result_end);
}
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3616,97 +4431,69 @@
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
+ }
+
// Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
Register top_reg = result_end.is_valid() ? result_end : result;
if (!top_reg.is(result)) {
- movq(top_reg, result);
+ movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(top_reg, limit_operand);
+ Operand limit_operand = ExternalOperand(allocation_limit);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ if (tag_result) {
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
- } else if ((flags & TAG_OBJECT) != 0) {
+ } else if (tag_result) {
// Tag the result if requested.
- addq(result, Immediate(kHeapObjectTag));
+ DCHECK(kHeapObjectTag == 1);
+ incp(result);
}
}
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- lea(result_end, Operand(element_count, element_size, header_size));
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
- }
+void MacroAssembler::Allocate(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
+ leap(result_end, Operand(element_count, element_size, header_size));
+ Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3720,29 +4507,33 @@
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- movq(result_end, object_size);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
- addq(result_end, result);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ if (!object_size.is(result_end)) {
+ movp(result_end, object_size);
+ }
+ addp(result_end, result);
j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
+ Operand limit_operand = ExternalOperand(allocation_limit);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -3752,30 +4543,30 @@
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
- Check(below, "Undo allocation of non allocated memory");
+ cmpp(object, top_operand);
+ Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
- movq(top_operand, object);
+ movp(top_operand, object);
}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
- Label* gc_required) {
+ Label* gc_required,
+ MutableMode mode) {
// Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- gc_required,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
// Set the map.
- LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ LoadRoot(kScratchRegister, map_index);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -3789,69 +4580,67 @@
// observing object alignment.
const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
kObjectAlignmentMask;
- ASSERT(kShortSize == 2);
+ DCHECK(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+ const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
- ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ DCHECK(kCharSize == 1);
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ // Allocate one-byte string in new space.
+ Allocate(SeqOneByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
// Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
+ movp(FieldOperand(result, String::kLengthOffset), scratch1);
+ movp(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -3861,34 +4650,29 @@
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+void MacroAssembler::AllocateOneByteConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -3897,34 +4681,26 @@
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
+ movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
@@ -3940,55 +4716,77 @@
Register length,
int min_length,
Register scratch) {
- ASSERT(min_length >= 0);
- if (FLAG_debug_code) {
+ DCHECK(min_length >= 0);
+ if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
- Assert(greater_equal, "Invalid min_length");
+ Assert(greater_equal, kInvalidMinLength);
}
- Label loop, done, short_string, short_loop;
+ Label short_loop, len8, len16, len24, done, short_string;
- const int kLongStringLimit = 20;
+ const int kLongStringLimit = 4 * kPointerSize;
if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
+ cmpl(length, Immediate(kPointerSize));
+ j(below, &short_string, Label::kNear);
}
- ASSERT(source.is(rsi));
- ASSERT(destination.is(rdi));
- ASSERT(length.is(rcx));
+ DCHECK(source.is(rsi));
+ DCHECK(destination.is(rdi));
+ DCHECK(length.is(rcx));
+
+ if (min_length <= kLongStringLimit) {
+ cmpl(length, Immediate(2 * kPointerSize));
+ j(below_equal, &len8, Label::kNear);
+ cmpl(length, Immediate(3 * kPointerSize));
+ j(below_equal, &len16, Label::kNear);
+ cmpl(length, Immediate(4 * kPointerSize));
+ j(below_equal, &len24, Label::kNear);
+ }
// Because source is 8-byte aligned in our uses of this function,
// we keep source aligned for the rep movs operation by copying the odd bytes
// at the end of the ranges.
- movq(scratch, length);
- shrl(length, Immediate(3));
- repmovsq();
+ movp(scratch, length);
+ shrl(length, Immediate(kPointerSizeLog2));
+ repmovsp();
// Move remaining bytes of length.
- andl(scratch, Immediate(0x7));
- movq(length, Operand(source, scratch, times_1, -8));
- movq(Operand(destination, scratch, times_1, -8), length);
- addq(destination, scratch);
+ andl(scratch, Immediate(kPointerSize - 1));
+ movp(length, Operand(source, scratch, times_1, -kPointerSize));
+ movp(Operand(destination, scratch, times_1, -kPointerSize), length);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
- jmp(&done);
+ jmp(&done, Label::kNear);
+ bind(&len24);
+ movp(scratch, Operand(source, 2 * kPointerSize));
+ movp(Operand(destination, 2 * kPointerSize), scratch);
+ bind(&len16);
+ movp(scratch, Operand(source, kPointerSize));
+ movp(Operand(destination, kPointerSize), scratch);
+ bind(&len8);
+ movp(scratch, Operand(source, 0));
+ movp(Operand(destination, 0), scratch);
+ // Move remaining bytes of length.
+ movp(scratch, Operand(source, length, times_1, -kPointerSize));
+ movp(Operand(destination, length, times_1, -kPointerSize), scratch);
+ addp(destination, length);
+ jmp(&done, Label::kNear);
bind(&short_string);
if (min_length == 0) {
testl(length, length);
- j(zero, &done);
+ j(zero, &done, Label::kNear);
}
- lea(scratch, Operand(destination, length, times_1, 0));
bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
- incq(source);
- incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
+ movb(scratch, Operand(source, 0));
+ movb(Operand(destination, 0), scratch);
+ incp(source);
+ incp(destination);
+ decl(length);
+ j(not_zero, &short_loop);
}
+
+ bind(&done);
}
@@ -3998,10 +4796,10 @@
Label loop, entry;
jmp(&entry);
bind(&loop);
- movq(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ movp(Operand(start_offset, 0), filler);
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
@@ -4009,15 +4807,15 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
+ movp(dst, rsi);
}
// We should not have found a with context by walking the context
@@ -4027,7 +4825,7 @@
if (emit_debug_code()) {
CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
Heap::kWithContextMapRootIndex);
- Check(not_equal, "Variable resolved to with context.");
+ Check(not_equal, kVariableResolvedToWithContext);
}
}
@@ -4039,38 +4837,26 @@
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
- movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+ movp(scratch,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
- int expected_index =
- Context::GetContextMapIndexFromElementsKind(expected_kind);
- cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
+ movp(scratch, Operand(scratch,
+ Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+
+ int offset = expected_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- int trans_index =
- Context::GetContextMapIndexFromElementsKind(transitioned_kind);
- movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
+ offset = transitioned_kind * kPointerSize +
+ FixedArrayBase::kHeaderSize;
+ movp(map_in_out, FieldOperand(scratch, offset));
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch, Register map_out) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movq(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
- FAST_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -4079,24 +4865,25 @@
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
- movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- movq(function, Operand(function, Context::SlotOffset(index)));
+ movp(function,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ // Load the function from the native context.
+ movp(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
- Abort("Global functions must have initial map");
+ Abort(kGlobalFunctionsMustHaveInitialMap);
bind(&ok);
}
}
@@ -4109,7 +4896,7 @@
// arguments.
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
- ASSERT(num_arguments >= 0);
+ DCHECK(num_arguments >= 0);
#ifdef _WIN64
const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
@@ -4121,19 +4908,52 @@
}
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+ Register index,
+ Register value,
+ uint32_t encoding_mask) {
+ Label is_object;
+ JumpIfNotSmi(string, &is_object);
+ Abort(kNonObject);
+ bind(&is_object);
+
+ Push(value);
+ movp(value, FieldOperand(string, HeapObject::kMapOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
+ Check(equal, kUnexpectedStringType);
+
+ // The index is assumed to be untagged coming in, tag it to compare with the
+ // string length without using a temp register, it is restored at the end of
+ // this function.
+ Integer32ToSmi(index, index);
+ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ Check(less, kIndexIsTooLarge);
+
+ SmiCompare(index, Smi::FromInt(0));
+ Check(greater_equal, kIndexIsNegative);
+
+ // Restore the index
+ SmiToInteger32(index, index);
+}
+
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
- ASSERT(frame_alignment != 0);
- ASSERT(num_arguments >= 0);
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ DCHECK(frame_alignment != 0);
+ DCHECK(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frame_alignment));
+ movp(kScratchRegister, rsp);
+ DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
- and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
+ movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4145,50 +4965,68 @@
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
- ASSERT(num_arguments >= 0);
+ DCHECK(base::OS::ActivationFrameAlignment() != 0);
+ DCHECK(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
+ movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6,
+ Register reg7,
+ Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
- masm_(Isolate::Current(), address, size + Assembler::kGap) {
+ masm_(NULL, address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
@@ -4199,12 +5037,12 @@
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
+ DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ movp(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4216,24 +5054,36 @@
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Move(scratch, map);
+ movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ andl(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* on_black,
Label::Distance on_black_distance) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(object, bitmap_scratch, mask_scratch);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
+ movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4247,11 +5097,11 @@
Label* not_data_object,
Label::Distance not_data_object_distance) {
Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
@@ -4264,24 +5114,24 @@
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
+ addp(bitmap_reg, rcx);
+ movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
+ shlp_cl(mask_reg);
}
@@ -4291,33 +5141,33 @@
Register mask_scratch,
Label* value_is_white_and_not_data,
Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
@@ -4328,16 +5178,16 @@
Label is_data_object;
// Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ movp(map, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
j(not_equal, ¬_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
+ movp(length, Immediate(HeapNumber::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_heap_number);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = rcx;
@@ -4350,31 +5200,31 @@
Label not_external;
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, ¬_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
+ movp(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ // Sequential string, either Latin1 or UC16.
+ DCHECK(kOneByteStringTag == 0x04);
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
+ // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
+ imulp(length, FieldOperand(value, String::kLengthOffset));
+ shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -4382,49 +5232,112 @@
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next;
+ Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Register empty_descriptor_array_value = r9;
- LoadRoot(empty_descriptor_array_value,
- Heap::kEmptyDescriptorArrayRootIndex);
- movq(rcx, rax);
+ movp(rcx, rax);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+
+ EnumLength(rdx, rbx);
+ Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
+ j(equal, call_runtime);
+
+ jmp(&start);
+
bind(&next);
- // Check that there are no elements. Register rcx contains the
- // current JS object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- j(not_equal, call_runtime);
-
- // Check that instance descriptors are not empty so that we can
- // check for an enum cache. Leave the map in rbx for the subsequent
- // prototype load.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
- JumpIfSmi(rdx, call_runtime);
-
- // Check that there is an enum cache in the non-empty instance
- // descriptors (rdx). This is the case if the next enumeration
- // index field does not contain a smi.
- movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
- JumpIfSmi(rdx, call_runtime);
+ movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
- Label check_prototype;
- cmpq(rcx, rax);
- j(equal, &check_prototype, Label::kNear);
- movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- cmpq(rdx, empty_fixed_array_value);
+ EnumLength(rdx, rbx);
+ Cmp(rdx, Smi::FromInt(0));
j(not_equal, call_runtime);
- // Load the prototype from the map and loop if non-null.
- bind(&check_prototype);
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ bind(&start);
+
+ // Check that there are no elements. Register rcx contains the current JS
+ // object we've reached through the prototype chain.
+ Label no_elements;
+ cmpp(empty_fixed_array_value,
+ FieldOperand(rcx, JSObject::kElementsOffset));
+ j(equal, &no_elements);
+
+ // Second chance, the object may be using the empty slow element dictionary.
+ LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
+ j(not_equal, call_runtime);
+
+ bind(&no_elements);
+ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
+void MacroAssembler::TestJSArrayForAllocationMemento(
+ Register receiver_reg,
+ Register scratch_reg,
+ Label* no_memento_found) {
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ leap(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+ Move(kScratchRegister, new_space_start);
+ cmpp(scratch_reg, kScratchRegister);
+ j(less, no_memento_found);
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
+ j(greater, no_memento_found);
+ CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Heap::kAllocationMementoMapRootIndex);
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+ Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* found) {
+ DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+ DCHECK(!scratch1.is(scratch0));
+ Register current = scratch0;
+ Label loop_again;
+
+ movp(current, object);
+
+ // Loop based on the map going up the prototype chain.
+ bind(&loop_again);
+ movp(current, FieldOperand(current, HeapObject::kMapOffset));
+ movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ j(equal, found);
+ movp(current, FieldOperand(current, Map::kPrototypeOffset));
+ CompareRoot(current, Heap::kNullValueRootIndex);
+ j(not_equal, &loop_again);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ DCHECK(!dividend.is(rax));
+ DCHECK(!dividend.is(rdx));
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ movl(rax, Immediate(mag.multiplier));
+ imull(dividend);
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) addl(rdx, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
+ if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
} } // namespace v8::internal