Update V8 to r7427: Initial merge by git
As required by WebKit r82507
Change-Id: I7ae83ef3f689356043b4929255b7c1dd31d8c5df
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index d431f6a..3a1a8b6 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -43,7 +43,7 @@
: Assembler(buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ code_object_(HEAP->undefined_value()) {
}
@@ -103,7 +103,22 @@
}
+int MacroAssembler::CallSize(Register target, Condition cond) {
+#if USE_BLX
+ return kInstrSize;
+#else
+ return 2 * kInstrSize;
+#endif
+}
+
+
void MacroAssembler::Call(Register target, Condition cond) {
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
blx(target, cond);
#else
@@ -111,29 +126,46 @@
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, cond), post_position);
+#endif
}
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+void MacroAssembler::Call(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ // Block constant pool for the call instruction sequence.
+ BlockConstPoolScope block_const_pool(this);
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
// blx ip
- // The two instructions (ldr and blx) could be separated by a constant
- // pool and the code would still work. The issue comes from the
- // patching code which expect the ldr to be just above the blx.
- { BlockConstPoolScope block_const_pool(this);
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
+ // Statement positions are expected to be recorded when the target
+ // address is loaded. The mov method will automatically record
+ // positions when pc is the target, since this is not the case here
+ // we have to do it explicitly.
+ positions_recorder()->WriteRecordedPositions();
- mov(ip, Operand(target, rmode), LeaveCC, cond);
- blx(ip, cond);
- }
+ mov(ip, Operand(target, rmode), LeaveCC, cond);
+ blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
@@ -141,24 +173,58 @@
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
-
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+int MacroAssembler::CallSize(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(target), rmode);
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
+void MacroAssembler::Call(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
+}
+
+
+int MacroAssembler::CallSize(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
}
@@ -226,7 +292,7 @@
} else if (!src2.is_single_instruction() &&
!src2.must_use_constant_pool() &&
- CpuFeatures::IsSupported(ARMv7) &&
+ Isolate::Current()->cpu_features()->IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
@@ -239,7 +305,7 @@
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@@ -254,7 +320,7 @@
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@@ -282,7 +348,7 @@
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@@ -296,7 +362,7 @@
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
@@ -307,7 +373,7 @@
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@@ -367,7 +433,7 @@
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, ¬_in_new_space);
@@ -395,8 +461,8 @@
Condition cond,
Label* branch) {
ASSERT(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask()));
- cmp(scratch, Operand(ExternalReference::new_space_start()));
+ and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
b(cond, branch);
}
@@ -429,7 +495,7 @@
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
@@ -461,7 +527,7 @@
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
@@ -553,7 +619,7 @@
ASSERT_EQ(dst1.code() + 1, dst2.code());
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@@ -578,7 +644,7 @@
ASSERT_EQ(src1.code() + 1, src2.code());
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@@ -665,7 +731,7 @@
mov(fp, Operand(sp)); // Setup new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(ip, Operand(0));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -673,9 +739,9 @@
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
str(cp, MemOperand(ip));
// Optionally save all double registers.
@@ -751,11 +817,11 @@
// Clear top frame.
mov(r3, Operand(0, RelocInfo::NONE));
- mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
str(r3, MemOperand(ip));
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_context_address, isolate())));
ldr(cp, MemOperand(ip));
#ifdef DEBUG
str(r3, MemOperand(ip));
@@ -784,7 +850,7 @@
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
bool definitely_matches = false;
Label regular_invoke;
@@ -837,10 +903,13 @@
}
Handle<Code> adaptor =
- Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) {
+ call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ }
Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -854,14 +923,15 @@
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- post_call_generator);
+ call_wrapper);
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
@@ -896,7 +966,7 @@
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -913,7 +983,7 @@
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
@@ -977,7 +1047,7 @@
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
+ mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -1000,7 +1070,7 @@
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(r1, MemOperand(r3));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r1);
@@ -1019,7 +1089,7 @@
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
// Save the current handler as the next handler.
- mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r7, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(r6, MemOperand(r7));
ASSERT(StackHandlerConstants::kNextOffset == 0);
push(r6);
@@ -1032,7 +1102,7 @@
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(r1);
- mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
}
@@ -1048,7 +1118,7 @@
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
@@ -1067,7 +1137,7 @@
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(lr, Operand(pc));
}
#endif
@@ -1087,7 +1157,7 @@
}
// Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ mov(r3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
@@ -1111,7 +1181,8 @@
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
@@ -1119,7 +1190,8 @@
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate())));
str(r0, MemOperand(r2));
}
@@ -1140,7 +1212,7 @@
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(lr, Operand(pc));
}
#endif
@@ -1172,7 +1244,7 @@
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
@@ -1191,7 +1263,7 @@
b(eq, &same_contexts);
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
@@ -1233,7 +1305,7 @@
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
@@ -1260,9 +1332,9 @@
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
intptr_t limit =
@@ -1282,7 +1354,7 @@
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
@@ -1316,7 +1388,7 @@
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
@@ -1340,9 +1412,9 @@
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
+ ExternalReference::new_space_allocation_limit_address(isolate());
intptr_t top =
reinterpret_cast<intptr_t>(new_space_allocation_top.address());
intptr_t limit =
@@ -1360,7 +1432,7 @@
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
@@ -1385,7 +1457,7 @@
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
Check(eq, "Unaligned allocation in new space");
}
@@ -1401,7 +1473,7 @@
void MacroAssembler::UndoAllocationInNewSpace(Register object,
Register scratch) {
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
+ ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, object, Operand(~kHeapObjectTagMask));
@@ -1689,7 +1761,7 @@
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ldr(r1, MemOperand(r7, kLevelOffset));
cmp(r1, r6);
Check(eq, "Unexpected level after return from api call");
@@ -1703,7 +1775,7 @@
// Check if the function scheduled an exception.
bind(&leave_exit_frame);
LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address()));
+ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
@@ -1714,8 +1786,11 @@
mov(pc, lr);
bind(&promote_scheduled_exception);
- MaybeObject* result = TryTailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
+ MaybeObject* result
+ = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0,
+ 1);
if (result->IsFailure()) {
return result;
}
@@ -1725,7 +1800,8 @@
str(r5, MemOperand(r7, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(0, r5);
- CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
+ CallCFunction(
+ ExternalReference::delete_handle_scope_extensions(isolate()), 0);
mov(r0, r4);
jmp(&leave_exit_frame);
@@ -1826,7 +1902,7 @@
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
@@ -1922,7 +1998,7 @@
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
+ ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
@@ -1962,10 +2038,125 @@
}
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch) {
+ Label done, normal_exponent, restore_sign;
+
+ // Extract the biased exponent in result.
+ Ubfx(result,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ cmp(result, Operand(HeapNumber::kExponentMask));
+ mov(result, Operand(0), LeaveCC, eq);
+ b(eq, &done);
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ sub(result,
+ result,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
+ SetCC);
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ b(le, &normal_exponent);
+ mov(result, Operand(0));
+ b(&done);
+
+ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
+
+ // Save the sign.
+ Register sign = result;
+ result = no_reg;
+ and_(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ orr(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ mov(input_high, Operand(input_high, LSL, scratch));
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ rsb(scratch, scratch, Operand(32), SetCC);
+ b(&pos_shift, ge);
+
+ // Negate scratch.
+ rsb(scratch, scratch, Operand(0));
+ mov(input_low, Operand(input_low, LSL, scratch));
+ b(&shift_done);
+
+ bind(&pos_shift);
+ mov(input_low, Operand(input_low, LSR, scratch));
+
+ bind(&shift_done);
+ orr(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ cmp(sign, Operand(0));
+ result = sign;
+ sign = no_reg;
+ rsb(result, input_high, Operand(0), LeaveCC, ne);
+ mov(result, input_high, LeaveCC, eq);
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+ DwVfpRegister double_input,
+ SwVfpRegister single_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
+ CpuFeatures::Scope scope(VFP3);
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch.is(result) &&
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
+ ASSERT(!single_scratch.is(double_input.low()) &&
+ !single_scratch.is(double_input.high()));
+
+ Label done;
+
+ // Clear cumulative exception flags.
+ ClearFPSCRBits(kVFPExceptionMask, scratch);
+ // Try a conversion to a signed integer.
+ vcvt_s32_f64(single_scratch, double_input);
+ vmov(result, single_scratch);
+ // Retrieve he FPSCR.
+ vmrs(scratch);
+ // Check for overflow and NaNs.
+ tst(scratch, Operand(kVFPOverflowExceptionBit |
+ kVFPUnderflowExceptionBit |
+ kVFPInvalidOpExceptionBit));
+ // If we had no exceptions we are done.
+ b(eq, &done);
+
+ // Load the double value and perform a manual truncation.
+ vmov(input_low, input_high, double_input);
+ EmitOutOfInt32RangeTruncate(result,
+ input_high,
+ input_low,
+ scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -1981,7 +2172,8 @@
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -1997,7 +2189,7 @@
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
- mov(r1, Operand(ExternalReference(f)));
+ mov(r1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
@@ -2009,9 +2201,9 @@
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- Runtime::Function* function = Runtime::FunctionForId(id);
+ const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function)));
+ mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1);
stub.SaveDoubles();
CallStub(&stub);
@@ -2054,7 +2246,9 @@
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
@@ -2083,11 +2277,12 @@
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Call(r2);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
@@ -2149,14 +2344,14 @@
void MacroAssembler::Assert(Condition cond, const char* msg) {
- if (FLAG_debug_code)
+ if (emit_debug_code())
Check(cond, msg);
}
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
LoadRoot(ip, index);
cmp(reg, ip);
Check(eq, "Register did not match expected root");
@@ -2165,7 +2360,7 @@
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ASSERT(!elements.is(ip));
Label ok;
push(elements);
@@ -2253,7 +2448,7 @@
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
cmp(dst, ip);
Check(eq, "Yo dawg, I heard you liked function contexts "
@@ -2278,7 +2473,7 @@
Register scratch) {
// Load the initial map. The global functions all have initial maps.
ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
b(&ok);
@@ -2300,6 +2495,18 @@
}
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
+ Register reg,
+ Register scratch,
+ Label* zero_and_neg,
+ Label* not_power_of_two) {
+ sub(scratch, reg, Operand(1), SetCC);
+ b(mi, zero_and_neg);
+ tst(scratch, reg);
+ b(ne, not_power_of_two);
+}
+
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
@@ -2486,7 +2693,7 @@
// Copy bytes in word size chunks.
bind(&word_loop);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
tst(src, Operand(kPointerSize - 1));
Assert(eq, "Expecting alignment for CopyBytes");
}
@@ -2585,11 +2792,17 @@
b(ne, failure);
}
+static const int kRegisterPassedArguments = 4;
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = ActivationFrameAlignment();
+
+ // Reserve space for Isolate address which is always passed as last parameter
+ num_arguments += 1;
+
// Up to four simple arguments are passed in registers r0..r3.
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments;
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -2606,17 +2819,41 @@
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- mov(ip, Operand(function));
- CallCFunction(ip, num_arguments);
+ CallCFunctionHelper(no_reg, function, ip, num_arguments);
+}
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_arguments);
}
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
+ // Push Isolate address as the last argument.
+ if (num_arguments < kRegisterPassedArguments) {
+ Register arg_to_reg[] = {r0, r1, r2, r3};
+ Register r = arg_to_reg[num_arguments];
+ mov(r, Operand(ExternalReference::isolate_address()));
+ } else {
+ int stack_passed_arguments = num_arguments - kRegisterPassedArguments;
+ // Push Isolate address on the stack after the arguments.
+ mov(scratch, Operand(ExternalReference::isolate_address()));
+ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ }
+ num_arguments += 1;
+
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if defined(V8_HOST_ARCH_ARM)
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
int frame_alignment = OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
@@ -2635,8 +2872,13 @@
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ if (function.is(no_reg)) {
+ mov(scratch, Operand(function_reference));
+ function = scratch;
+ }
Call(function);
- int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments;
if (OS::ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -2650,7 +2892,7 @@
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the instruction is a ldr reg, [pc + offset] .
and_(result, result, Operand(kLdrPCPattern));
cmp(result, Operand(kLdrPCPattern));