Version 3.22.8
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@17111 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 428e33b..db3f14e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-10-04: Version 3.22.8
+
+ Performance and stability improvements on all platforms.
+
+
2013-10-03: Version 3.22.7
Debug: Allow stepping into on a given call frame
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 2c7fb78..5c5231b 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -168,6 +168,18 @@
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -1185,993 +1197,6 @@
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in r0.
-// Register heapnumber_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// d0: Left value.
-// d1: Right value.
-// If soft float ABI, use also r0, r1, r2, r3.
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (!masm->use_eabi_hardfloat()) {
- __ vmov(r0, r1, d0);
- __ vmov(r2, r3, d1);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // VFP2 is a base requirement for V8
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ push(r2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op,
- Register scratch1,
- Register scratch2) {
- Register left = r1;
- Register right = r0;
-
- ASSERT(right.is(r0));
- ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, ¬_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV: {
- Label div_with_sdiv;
-
- // Check for 0 divisor.
- __ cmp(right, Operand::Zero());
- __ b(eq, ¬_smi_result);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ b(ne, &div_with_sdiv);
- // Check for no remainder.
- __ tst(left, scratch1);
- __ b(ne, ¬_smi_result);
- // Check for positive left hand side.
- __ cmp(left, Operand::Zero());
- __ b(mi, &div_with_sdiv);
- } else {
- __ b(ne, ¬_smi_result);
- // Check for positive and no remainder.
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, ¬_smi_result);
- }
-
- // Perform division by shifting.
- __ clz(scratch1, scratch1);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm, SUDIV);
- Label result_not_zero;
-
- __ bind(&div_with_sdiv);
- // Do division.
- __ sdiv(scratch1, left, right);
- // Check that the remainder is zero.
- __ mls(scratch2, scratch1, right, left);
- __ cmp(scratch2, Operand::Zero());
- __ b(ne, ¬_smi_result);
- // Check for negative zero result.
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &result_not_zero);
- __ cmp(right, Operand::Zero());
- __ b(lt, ¬_smi_result);
- __ bind(&result_not_zero);
- // Check for the corner case of dividing the most negative smi by -1.
- __ cmp(scratch1, Operand(0x40000000));
- __ b(eq, ¬_smi_result);
- // Tag and return the result.
- __ SmiTag(right, scratch1);
- __ Ret();
- }
- break;
- }
- case Token::MOD: {
- Label modulo_with_sdiv;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- // Check for x % 0.
- __ cmp(right, Operand::Zero());
- __ b(eq, ¬_smi_result);
-
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &modulo_with_sdiv);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- __ b(ne, &modulo_with_sdiv);
- } else {
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, ¬_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
- }
-
- // Perform modulus by masking (scratch1 contains right - 1).
- __ and_(right, left, Operand(scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatureScope scope(masm, SUDIV);
- __ bind(&modulo_with_sdiv);
- __ mov(scratch2, right);
- // Perform modulus with sdiv and mls.
- __ sdiv(scratch1, left, right);
- __ mls(right, scratch1, right, left);
- // Return if the result is not 0.
- __ cmp(right, Operand::Zero());
- __ Ret(ne);
- // The result is 0, check for -0 case.
- __ cmp(left, Operand::Zero());
- __ Ret(pl);
- // This is a -0 case, restore the value of right.
- __ mov(right, scratch2);
- // We fall through here to not_smi_result to produce -0.
- }
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, ¬_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ TrySmiTag(right, scratch1, ¬_smi_result);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(¬_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Register left = r1;
- Register right = r0;
- Register result = scratch3;
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = scratch4;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into d0 and d1.
- if (smi_operands) {
- __ SmiToDouble(d1, right);
- __ SmiToDouble(d0, left);
- } else {
- // Load right operand into d1.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, d1, heap_number_map, scratch1, d8, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, d1, heap_number_map, scratch1, fail);
- }
- // Load left operand into d0.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, d0, heap_number_map, scratch1, d8, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(
- left, d0, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using VFP registers:
- // d0: Left value
- // d1: Right value
- switch (op) {
- case Token::ADD:
- __ vadd(d5, d0, d1);
- break;
- case Token::SUB:
- __ vsub(d5, d0, d1);
- break;
- case Token::MUL:
- __ vmul(d5, d0, d1);
- break;
- case Token::DIV:
- __ vdiv(d5, d0, d1);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ b(mi, &result_not_a_smi);
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ TrySmiTag(r0, r2, &result_not_a_smi);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // r2: Answer as signed int32.
- // result: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(result));
-
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- __ vmov(s0, r2);
- if (op == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- __ JumpIfNotSmi(scratch1, ¬_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, ¬_smis, op, mode, scratch2, scratch3,
- scratch1, scratch4);
- }
- __ bind(¬_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
- __ b(ne, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
- NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
- ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r4;
- Register scratch2 = r9;
- Register scratch3 = r5;
- LowDwVfpRegister double_scratch = d0;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- __ LoadNumberAsInt32Double(
- right, d1, heap_number_map, scratch1, d8, &transition);
- __ LoadNumberAsInt32Double(
- left, d0, heap_number_map, scratch1, d8, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d0, d1);
- break;
- case Token::SUB:
- __ vsub(d5, d0, d1);
- break;
- case Token::MUL:
- __ vmul(d5, d0, d1);
- break;
- case Token::DIV:
- __ vdiv(d5, d0, d1);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- __ TryDoubleToInt32Exact(scratch1, d5, d8);
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTag(scratch1, SetCC);
- __ b(vs, &return_heap_number);
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ b(ne, ¬_zero);
- __ VmovHigh(scratch2, d5);
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &transition);
- __ bind(¬_zero);
- __ mov(r0, scratch1);
- __ Ret();
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Vmov(d8, fixed_right_arg_value(), scratch1);
- __ VFPCompareAndSetFlags(d1, d8);
- __ b(ne, &transition);
- }
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(left, r3, heap_number_map,
- scratch1, d0, d1, &transition);
- __ LoadNumberAsInt32(right, r2, heap_number_map,
- scratch1, d0, d1, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi. If not try to return a heap number.
- // (We know the result is an int32).
- __ TrySmiTag(r0, r2, &return_heap_number);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
- r5, r6, r4, r9);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
- r4, r5, r9);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in d2, double result goes
// into d2.
@@ -2614,6 +1639,7 @@
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index ea3fa00..af7f6af 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -834,26 +834,31 @@
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
diff --git a/src/ast.h b/src/ast.h
index 9c6f213..7432b0b 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1968,7 +1968,7 @@
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
- TypeInfo type() const { return type_; }
+ Handle<Type> type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
@@ -1997,7 +1997,7 @@
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
- TypeInfo type_;
+ Handle<Type> type_;
Expression* expression_;
int pos_;
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 64a6ded..456ecc0 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -851,6 +851,102 @@
template <>
+HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
+ BinaryOpStub* stub = casted_stub();
+ HValue* left = GetParameter(0);
+ HValue* right = GetParameter(1);
+
+ Handle<Type> left_type = stub->GetLeftType(isolate());
+ Handle<Type> right_type = stub->GetRightType(isolate());
+ Handle<Type> result_type = stub->GetResultType(isolate());
+
+ ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
+ (stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
+
+ HValue* result = NULL;
+ if (stub->operation() == Token::ADD &&
+ (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
+ !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
+ // For the generic add stub a fast case for String add is performance
+ // critical.
+ if (left_type->Maybe(Type::String())) {
+ IfBuilder left_string(this);
+ left_string.IfNot<HIsSmiAndBranch>(left);
+ left_string.AndIf<HIsStringAndBranch>(left);
+ left_string.Then();
+ Push(Add<HStringAdd>(left, right, STRING_ADD_CHECK_RIGHT));
+ left_string.Else();
+ Push(AddInstruction(BuildBinaryOperation(stub->operation(),
+ left, right, left_type, right_type, result_type,
+ stub->fixed_right_arg(), true)));
+ left_string.End();
+ result = Pop();
+ } else {
+ IfBuilder right_string(this);
+ right_string.IfNot<HIsSmiAndBranch>(right);
+ right_string.AndIf<HIsStringAndBranch>(right);
+ right_string.Then();
+ Push(Add<HStringAdd>(left, right, STRING_ADD_CHECK_LEFT));
+ right_string.Else();
+ Push(AddInstruction(BuildBinaryOperation(stub->operation(),
+ left, right, left_type, right_type, result_type,
+ stub->fixed_right_arg(), true)));
+ right_string.End();
+ result = Pop();
+ }
+ } else {
+ result = AddInstruction(BuildBinaryOperation(stub->operation(),
+ left, right, left_type, right_type, result_type,
+ stub->fixed_right_arg(), true));
+ }
+
+ // If we encounter a generic argument, the number conversion is
+ // observable, thus we cannot afford to bail out after the fact.
+ if (!stub->HasSideEffects(isolate())) {
+ if (result_type->Is(Type::Smi())) {
+ if (stub->operation() == Token::SHR) {
+ // TODO(olivf) Replace this by a SmiTagU Instruction.
+ // 0x40000000: this number would convert to negative when interpreting
+ // the register as signed value;
+ IfBuilder if_of(this);
+ if_of.IfNot<HCompareNumericAndBranch>(result,
+ Add<HConstant>(static_cast<int>(SmiValuesAre32Bits()
+ ? 0x80000000 : 0x40000000)), Token::EQ_STRICT);
+ if_of.Then();
+ if_of.ElseDeopt("UInt->Smi oveflow");
+ if_of.End();
+ }
+ }
+ result = EnforceNumberType(result, result_type);
+ }
+
+ // Reuse the double box if we are allowed to (i.e. chained binops).
+ if (stub->CanReuseDoubleBox()) {
+ HValue* reuse = (stub->mode() == OVERWRITE_LEFT) ? left : right;
+ IfBuilder if_heap_number(this);
+ if_heap_number.IfNot<HIsSmiAndBranch>(reuse);
+ if_heap_number.Then();
+ HValue* res_val = Add<HForceRepresentation>(result,
+ Representation::Double());
+ HObjectAccess access = HObjectAccess::ForHeapNumberValue();
+ Add<HStoreNamedField>(reuse, access, res_val);
+ Push(reuse);
+ if_heap_number.Else();
+ Push(result);
+ if_heap_number.End();
+ result = Pop();
+ }
+
+ return result;
+}
+
+
+Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index ace4af4..75b8807 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -137,6 +137,7 @@
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
+ ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
@@ -203,119 +204,476 @@
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
- if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
- // The OddballStub handles a number and an oddball, not two oddballs.
- operands_type = BinaryOpIC::GENERIC;
- }
- switch (operands_type) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
+void BinaryOpStub::PrintBaseName(StringStream* stream) {
const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s+%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(left_type_),
- BinaryOpIC::GetName(right_type_));
+ const char* ovr = "";
+ if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
+ if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
+ stream->Add("BinaryOpStub_%s%s", op_name, ovr);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
- GenerateBothStringStub(masm);
- return;
+void BinaryOpStub::PrintState(StringStream* stream) {
+ stream->Add("(");
+ stream->Add(StateToName(left_state_));
+ stream->Add("*");
+ if (fixed_right_arg_.has_value) {
+ stream->Add("%d", fixed_right_arg_.value);
+ } else {
+ stream->Add(StateToName(right_state_));
}
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
+ stream->Add("->");
+ stream->Add(StateToName(result_state_));
+ stream->Add(")");
+}
+
+
+Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
+ Handle<Object> right,
+ Isolate* isolate) {
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
+ Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
+ Object* builtin = builtins->javascript_builtin(func);
+ Handle<JSFunction> builtin_function =
+ Handle<JSFunction>(JSFunction::cast(builtin), isolate);
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(isolate, builtin_function, left,
+ 1, &right, &caught_exception);
+ return Maybe<Handle<Object> >(!caught_exception, result);
+}
+
+
+void BinaryOpStub::Initialize() {
+ fixed_right_arg_.has_value = false;
+ left_state_ = right_state_ = result_state_ = NONE;
+}
+
+
+void BinaryOpStub::Generate(Token::Value op,
+ State left,
+ State right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate) {
+ BinaryOpStub stub(INITIALIZED);
+ stub.op_ = op;
+ stub.left_state_ = left;
+ stub.right_state_ = right;
+ stub.result_state_ = result;
+ stub.mode_ = mode;
+ stub.GetCode(isolate);
+}
+
+
+void BinaryOpStub::Generate(Token::Value op,
+ State left,
+ int right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate) {
+ BinaryOpStub stub(INITIALIZED);
+ stub.op_ = op;
+ stub.left_state_ = left;
+ stub.fixed_right_arg_.has_value = true;
+ stub.fixed_right_arg_.value = right;
+ stub.right_state_ = SMI;
+ stub.result_state_ = result;
+ stub.mode_ = mode;
+ stub.GetCode(isolate);
+}
+
+
+void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
+ Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
+ Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
+ Token::BIT_XOR, Token::SHL, Token::SHR};
+ for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
+ BinaryOpStub stub(UNINITIALIZED);
+ stub.op_ = binop[i];
+ stub.GetCode(isolate);
+ }
+
+ // TODO(olivf) We should investigate why adding stubs to the snapshot is so
+ // expensive at runtime. When solved we should be able to add most binops to
+ // the snapshot instead of hand-picking them.
+ // Generated list of commonly used stubs
+ Generate(Token::ADD, GENERIC, STRING, STRING, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, GENERIC, STRING, STRING, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, STRING, GENERIC, STRING, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, STRING, GENERIC, STRING, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, STRING, GENERIC, STRING, OVERWRITE_RIGHT, isolate);
+ Generate(Token::ADD, STRING, STRING, STRING, NO_OVERWRITE, isolate);
+ Generate(Token::ADD, STRING, STRING, STRING, OVERWRITE_LEFT, isolate);
+ Generate(Token::ADD, STRING, STRING, STRING, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 2, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 32, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 4, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MOD, SMI, 8, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT, isolate);
+ Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate);
+}
+
+
+bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
+ return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
+ FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
+}
+
+
+int BinaryOpStub::encode_arg_value(int32_t value) const {
+ ASSERT(can_encode_arg_value(value));
+ return WhichPowerOf2(value);
+}
+
+
+int32_t BinaryOpStub::decode_arg_value(int value) const {
+ return 1 << value;
+}
+
+
+int BinaryOpStub::encode_token(Token::Value op) const {
+ ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
+ return op - FIRST_TOKEN;
+}
+
+
+Token::Value BinaryOpStub::decode_token(int op) const {
+ int res = op + FIRST_TOKEN;
+ ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
+ return static_cast<Token::Value>(res);
+}
+
+
+const char* BinaryOpStub::StateToName(State state) {
+ switch (state) {
+ case NONE:
+ return "None";
+ case SMI:
+ return "Smi";
+ case INT32:
+ return "Int32";
+ case NUMBER:
+ return "Number";
+ case STRING:
+ return "String";
+ case GENERIC:
+ return "Generic";
+ }
+ return "";
+}
+
+
+void BinaryOpStub::UpdateStatus(Handle<Object> left,
+ Handle<Object> right,
+ Maybe<Handle<Object> > result) {
+ int old_state = GetExtraICState();
+
+ UpdateStatus(left, &left_state_);
+ UpdateStatus(right, &right_state_);
+
+ int32_t value;
+ bool new_has_fixed_right_arg =
+ right->ToInt32(&value) && can_encode_arg_value(value) &&
+ (left_state_ == SMI || left_state_ == INT32) &&
+ (result_state_ == NONE || !fixed_right_arg_.has_value);
+
+ fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
+
+ if (result.has_value) UpdateStatus(result.value, &result_state_);
+
+ State max_input = Max(left_state_, right_state_);
+
+ // Avoid unnecessary Representation changes.
+ if (left_state_ == STRING && right_state_ < STRING) {
+ right_state_ = GENERIC;
+ } else if (right_state_ == STRING && left_state_ < STRING) {
+ left_state_ = GENERIC;
+ } else if ((right_state_ == GENERIC && left_state_ != STRING) ||
+ (left_state_ == GENERIC && right_state_ != STRING)) {
+ left_state_ = right_state_ = GENERIC;
+ } else if (!has_int_result() && op_ != Token::SHR &&
+ max_input <= NUMBER && max_input > result_state_) {
+ result_state_ = max_input;
+ }
+
+ ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) ||
+ op_ == Token::ADD);
+
+ if (old_state == GetExtraICState()) {
+ // Since the fpu is to precise, we might bail out on numbers which
+ // actually would truncate with 64 bit precision.
+ ASSERT(!CpuFeatures::IsSupported(SSE2) &&
+ result_state_ <= INT32);
+ result_state_ = NUMBER;
+ }
+}
+
+
+void BinaryOpStub::UpdateStatus(Handle<Object> object,
+ State* state) {
+ v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
+ if (object->IsUndefined()) {
+ // Undefined will be automatically truncated for us by HChange.
+ type = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
+ op_ == Token::BIT_XOR || op_ == Token::SAR ||
+ op_ == Token::SHL || op_ == Token::SHR)
+ ? TypeInfo::Integer32()
+ : TypeInfo::Double();
+ }
+ State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
+ State new_state = NONE;
+ if (type.IsSmi()) {
+ new_state = SMI;
+ } else if (type.IsInteger32()) {
+ new_state = int_state;
+ } else if (type.IsNumber()) {
+ new_state = NUMBER;
+ } else if (object->IsString() && operation() == Token::ADD) {
+ new_state = STRING;
+ } else {
+ new_state = GENERIC;
+ }
+ if ((new_state <= NUMBER && *state > NUMBER) ||
+ (new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
+ new_state = GENERIC;
+ }
+ *state = Max(*state, new_state);
+}
+
+
+Handle<Type> BinaryOpStub::StateToType(State state,
+ Isolate* isolate) {
+ Handle<Type> t = handle(Type::None(), isolate);
+ switch (state) {
+ case NUMBER:
+ t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
+ // Fall through.
+ case INT32:
+ t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
+ // Fall through.
+ case SMI:
+ t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
+ break;
+
+ case STRING:
+ t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
+ break;
+ case GENERIC:
+ return handle(Type::Any(), isolate);
+ break;
+ case NONE:
+ break;
+ }
+ return t;
+}
+
+
+Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
+ return StateToType(left_state_, isolate);
+}
+
+
+Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
+ return StateToType(right_state_, isolate);
+}
+
+
+Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
+ if (HasSideEffects(isolate)) return StateToType(NONE, isolate);
+ if (result_state_ == GENERIC && op_ == Token::ADD) {
+ return handle(Type::Union(handle(Type::Number(), isolate),
+ handle(Type::String(), isolate)), isolate);
+ }
+ ASSERT(result_state_ != GENERIC);
+ if (result_state_ == NUMBER && op_ == Token::SHR) {
+ return handle(Type::Unsigned32(), isolate);
+ }
+ return StateToType(result_state_, isolate);
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 0d0e2b8..9b25319 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -996,156 +996,173 @@
};
-class BinaryOpStub: public PlatformCodeStub {
+class BinaryOpStub: public HydrogenCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- platform_specific_bit_(false),
- left_type_(BinaryOpIC::UNINITIALIZED),
- right_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- encoded_right_arg_(false, encode_arg_value(1)) {
+ : HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
+ ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
Initialize();
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- BinaryOpIC::TypeInfo result_type,
- Maybe<int32_t> fixed_right_arg)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- platform_specific_bit_(PlatformSpecificBits::decode(key)),
- left_type_(left_type),
- right_type_(right_type),
- result_type_(result_type),
- encoded_right_arg_(fixed_right_arg.has_value,
- encode_arg_value(fixed_right_arg.value)) { }
-
- static void decode_types_from_minor_key(int minor_key,
- BinaryOpIC::TypeInfo* left_type,
- BinaryOpIC::TypeInfo* right_type,
- BinaryOpIC::TypeInfo* result_type) {
- *left_type =
- static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
- *right_type =
- static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
- *result_type =
- static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
+ explicit BinaryOpStub(Code::ExtraICState state)
+ : op_(decode_token(OpBits::decode(state))),
+ mode_(OverwriteModeField::decode(state)),
+ fixed_right_arg_(
+ Maybe<int>(HasFixedRightArgBits::decode(state),
+ decode_arg_value(FixedRightArgValueBits::decode(state)))),
+ left_state_(LeftStateField::decode(state)),
+ right_state_(fixed_right_arg_.has_value
+ ? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
+ : RightStateField::decode(state)),
+ result_state_(ResultStateField::decode(state)) {
+ // We don't deserialize the SSE2 Field, since this is only used to be able
+ // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
+ // generation we always want it to reflect the current state.
+ ASSERT(!fixed_right_arg_.has_value ||
+ can_encode_arg_value(fixed_right_arg_.value));
}
- static Token::Value decode_op_from_minor_key(int minor_key) {
- return static_cast<Token::Value>(OpBits::decode(minor_key));
+ static const int FIRST_TOKEN = Token::BIT_OR;
+ static const int LAST_TOKEN = Token::MOD;
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
+ static void InitializeForIsolate(Isolate* isolate) {
+ BinaryOpStub binopStub(UNINITIALIZED);
+ binopStub.InitializeInterfaceDescriptor(
+ isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
}
- static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
- return Maybe<int>(
- HasFixedRightArgBits::decode(minor_key),
- decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
- }
-
- int fixed_right_arg_value() const {
- return decode_arg_value(encoded_right_arg_.value);
- }
-
- static bool can_encode_arg_value(int32_t value) {
- return value > 0 &&
- IsPowerOf2(value) &&
- FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
- }
-
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- bool platform_specific_bit_; // Indicates SSE3 on IA32.
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo left_type_;
- BinaryOpIC::TypeInfo right_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- Maybe<int> encoded_right_arg_;
-
- static int encode_arg_value(int32_t value) {
- ASSERT(can_encode_arg_value(value));
- return WhichPowerOf2(value);
- }
-
- static int32_t decode_arg_value(int value) {
- return 1 << value;
- }
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
- // Note: We actually do not need 7 bits for the operation, just 4 bits to
- // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class PlatformSpecificBits: public BitField<bool, 9, 1> {};
- class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
- class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
- class FixedRightArgValueBits: public BitField<int, 20, 5> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | PlatformSpecificBits::encode(platform_specific_bit_)
- | LeftTypeBits::encode(left_type_)
- | RightTypeBits::encode(right_type_)
- | ResultTypeBits::encode(result_type_)
- | HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
- | FixedRightArgValueBits::encode(encoded_right_arg_.value);
- }
-
-
- // Platform-independent implementation.
- void Generate(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- // Platform-independent signature, platform-specific implementation.
- void Initialize();
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
-
- // Entirely platform-specific methods are defined as static helper
- // functions in the <arch>/code-stubs-<arch>.cc files.
-
virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
-
virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ if (Max(left_state_, right_state_) == NONE) {
+ return ::v8::internal::UNINITIALIZED;
+ }
+ if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
+ return MONOMORPHIC;
}
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
+ virtual Code::ExtraICState GetExtraICState() {
+ bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
+ CpuFeatures::IsSafeForSnapshot(SSE2);
+
+ return OpBits::encode(encode_token(op_))
+ | LeftStateField::encode(left_state_)
+ | RightStateField::encode(fixed_right_arg_.has_value
+ ? NONE : right_state_)
+ | ResultStateField::encode(result_state_)
+ | HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
+ | FixedRightArgValueBits::encode(fixed_right_arg_.has_value
+ ? encode_arg_value(
+ fixed_right_arg_.value)
+ : 0)
+ | SSE2Field::encode(sse_field)
+ | OverwriteModeField::encode(mode_);
}
- friend class CodeGenerator;
+ bool CanReuseDoubleBox() {
+ return result_state_ <= NUMBER && result_state_ > SMI &&
+ ((left_state_ > SMI && left_state_ <= NUMBER &&
+ mode_ == OVERWRITE_LEFT) ||
+ (right_state_ > SMI && right_state_ <= NUMBER &&
+ mode_ == OVERWRITE_RIGHT));
+ }
+
+ bool HasSideEffects(Isolate* isolate) const {
+ Handle<Type> left = GetLeftType(isolate);
+ Handle<Type> right = GetRightType(isolate);
+ return left->Maybe(Type::Receiver()) || right->Maybe(Type::Receiver());
+ }
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate);
+
+ Maybe<Handle<Object> > Result(Handle<Object> left,
+ Handle<Object> right,
+ Isolate* isolate);
+
+ Token::Value operation() const { return op_; }
+ OverwriteMode mode() const { return mode_; }
+ Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+ Handle<Type> GetLeftType(Isolate* isolate) const;
+ Handle<Type> GetRightType(Isolate* isolate) const;
+ Handle<Type> GetResultType(Isolate* isolate) const;
+
+ void UpdateStatus(Handle<Object> left,
+ Handle<Object> right,
+ Maybe<Handle<Object> > result);
+
+ void PrintState(StringStream* stream);
+
+ private:
+ explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
+ op_(Token::ADD),
+ mode_(NO_OVERWRITE) {
+ Initialize();
+ }
+ void Initialize();
+
+ enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+ // We truncate the last bit of the token.
+ STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
+ class LeftStateField: public BitField<State, 0, 3> {};
+ // When fixed right arg is set, we don't need to store the right state.
+ // Thus the two fields can overlap.
+ class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
+ class FixedRightArgValueBits: public BitField<int, 5, 4> {};
+ class RightStateField: public BitField<State, 5, 3> {};
+ class ResultStateField: public BitField<State, 9, 3> {};
+ class SSE2Field: public BitField<bool, 12, 1> {};
+ class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
+ class OpBits: public BitField<int, 15, 5> {};
+
+ virtual CodeStub::Major MajorKey() { return BinaryOp; }
+ virtual int NotMissMinorKey() { return GetExtraICState(); }
+
+ static Handle<Type> StateToType(State state,
+ Isolate* isolate);
+
+ static void Generate(Token::Value op,
+ State left,
+ int right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate);
+
+ static void Generate(Token::Value op,
+ State left,
+ State right,
+ State result,
+ OverwriteMode mode,
+ Isolate* isolate);
+
+ void UpdateStatus(Handle<Object> object,
+ State* state);
+
+ bool can_encode_arg_value(int32_t value) const;
+ int encode_arg_value(int32_t value) const;
+ int32_t decode_arg_value(int value) const;
+ int encode_token(Token::Value op) const;
+ Token::Value decode_token(int op) const;
+
+ bool has_int_result() const {
+ return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
+ op_ == Token::BIT_OR || op_ == Token::SAR || op_ == Token::SHL;
+ }
+
+ const char* StateToName(State state);
+
+ void PrintBaseName(StringStream* stream);
+
+ Token::Value op_;
+ OverwriteMode mode_;
+
+ Maybe<int> fixed_right_arg_;
+ State left_state_;
+ State right_state_;
+ State result_state_;
};
@@ -1718,7 +1735,9 @@
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
- SkipFastPathBits::encode(skip_fastpath);
+ SkipFastPathBits::encode(skip_fastpath) |
+ SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
+ CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
}
Register source() {
@@ -1761,6 +1780,8 @@
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
+ class SSEBits:
+ public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
diff --git a/src/defaults.cc b/src/defaults.cc
index eb9e0de..cbbe537 100644
--- a/src/defaults.cc
+++ b/src/defaults.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// The GYP based build ends up defining USING_V8_SHARED when compiling this
+// file.
+#undef USING_V8_SHARED
#include "../include/v8-defaults.h"
#include "platform.h"
diff --git a/src/factory.cc b/src/factory.cc
index 1425552..116434c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -126,6 +126,18 @@
}
+Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ WeakHashTable::Allocate(isolate()->heap(),
+ at_least_space_for,
+ WeakHashTable::USE_DEFAULT_MINIMUM_CAPACITY,
+ TENURED),
+ WeakHashTable);
+}
+
+
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack) {
ASSERT(0 <= number_of_descriptors);
@@ -598,8 +610,11 @@
Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
- int new_length) {
- CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
+ int new_length,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(),
+ array->CopySize(new_length, pretenure),
+ FixedArray);
}
diff --git a/src/factory.h b/src/factory.h
index 5704066..a4f60bd 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -71,6 +71,8 @@
Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
+ Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for);
+
Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors,
int slack = 0);
Handle<DeoptimizationInputData> NewDeoptimizationInputData(
@@ -265,7 +267,8 @@
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
- int new_length);
+ int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index f6a8e84..e40ffe4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -511,6 +511,8 @@
"garbage collect maps from which no objects can be reached")
DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
"make maps embedded in optimized code weak")
+DEFINE_bool(weak_embedded_objects_in_optimized_code, false,
+ "make objects embedded in optimized code weak")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
diff --git a/src/handles.cc b/src/handles.cc
index 054dde3..20fe116 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -881,4 +881,15 @@
}
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code) {
+ heap->EnsureWeakObjectToCodeTable();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object));
+ dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code);
+ CALL_HEAP_FUNCTION_VOID(heap->isolate(),
+ heap->AddWeakObjectToCodeDependency(*object, *dep));
+}
+
+
} } // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
index 05b0c5e..c1400ed 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -321,6 +321,9 @@
Handle<Object> key,
Handle<Object> value);
+void AddWeakObjectToCodeDependency(Heap* heap,
+ Handle<Object> object,
+ Handle<Code> code);
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 8529406..e260746 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -846,15 +846,15 @@
#ifdef VERIFY_HEAP
-NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
+NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_embedded_maps_verification_scope_depth_++;
+ isolate->heap()->no_weak_object_verification_scope_depth_++;
}
-NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
+NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
Isolate* isolate = Isolate::Current();
- isolate->heap()->no_weak_embedded_maps_verification_scope_depth_--;
+ isolate->heap()->no_weak_object_verification_scope_depth_--;
}
#endif
diff --git a/src/heap.cc b/src/heap.cc
index c965efd..0a594e7 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -143,7 +143,7 @@
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
gcs_since_last_deopt_(0),
#ifdef VERIFY_HEAP
- no_weak_embedded_maps_verification_scope_depth_(0),
+ no_weak_object_verification_scope_depth_(0),
#endif
promotion_queue_(this),
configured_(false),
@@ -6730,6 +6730,7 @@
native_contexts_list_ = undefined_value();
array_buffers_list_ = undefined_value();
allocation_sites_list_ = undefined_value();
+ weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -6877,6 +6878,34 @@
}
+MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
+ DependentCode* dep) {
+ ASSERT(!InNewSpace(obj));
+ ASSERT(!InNewSpace(dep));
+ MaybeObject* maybe_obj =
+ WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
+ WeakHashTable* table;
+ if (!maybe_obj->To(&table)) return maybe_obj;
+ set_weak_object_to_code_table(table);
+ ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
+ return weak_object_to_code_table_;
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
+ Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+ if (dep->IsDependentCode()) return DependentCode::cast(dep);
+ return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+ if (!weak_object_to_code_table()->IsHashTable()) {
+ set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
+ }
+}
+
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
diff --git a/src/heap.h b/src/heap.h
index 3c92d45..f5b99d6 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1308,6 +1308,8 @@
Object* allocation_sites_list() { return allocation_sites_list_; }
Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+ Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@@ -1399,8 +1401,8 @@
void Verify();
- bool weak_embedded_maps_verification_enabled() {
- return no_weak_embedded_maps_verification_scope_depth_ == 0;
+ bool weak_embedded_objects_verification_enabled() {
+ return no_weak_object_verification_scope_depth_ == 0;
}
#endif
@@ -1853,6 +1855,16 @@
Heap* heap_;
};
+ MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep);
+
+ DependentCode* LookupWeakObjectToCodeDependency(Object* obj);
+
+ void InitializeWeakObjectToCodeTable() {
+ set_weak_object_to_code_table(undefined_value());
+ }
+
+ void EnsureWeakObjectToCodeTable();
+
private:
Heap();
@@ -1967,10 +1979,16 @@
bool old_gen_exhausted_;
// Weak list heads, threaded through the objects.
+ // List heads are initilized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
Object* array_buffers_list_;
Object* allocation_sites_list_;
+ // WeakHashTable that maps objects embedded in optimized code to dependent
+ // code list. It is initilized lazily and contains the undefined_value at
+ // start.
+ Object* weak_object_to_code_table_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@@ -2279,6 +2297,15 @@
void ClearObjectStats(bool clear_last_time_stats = false);
+ void set_weak_object_to_code_table(Object* value) {
+ ASSERT(!InNewSpace(value));
+ weak_object_to_code_table_ = value;
+ }
+
+ Object** weak_object_to_code_table_address() {
+ return &weak_object_to_code_table_;
+ }
+
static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -2334,7 +2361,7 @@
int gcs_since_last_deopt_;
#ifdef VERIFY_HEAP
- int no_weak_embedded_maps_verification_scope_depth_;
+ int no_weak_object_verification_scope_depth_;
#endif
static const int kMaxMarkSweepsInIdleRound = 7;
@@ -2368,7 +2395,7 @@
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
#ifdef VERIFY_HEAP
- friend class NoWeakEmbeddedMapsVerificationScope;
+ friend class NoWeakObjectVerificationScope;
#endif
DISALLOW_COPY_AND_ASSIGN(Heap);
@@ -2433,10 +2460,10 @@
};
#ifdef VERIFY_HEAP
-class NoWeakEmbeddedMapsVerificationScope {
+class NoWeakObjectVerificationScope {
public:
- inline NoWeakEmbeddedMapsVerificationScope();
- inline ~NoWeakEmbeddedMapsVerificationScope();
+ inline NoWeakObjectVerificationScope();
+ inline ~NoWeakObjectVerificationScope();
};
#endif
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index c564872..ec99f9b 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -767,6 +767,9 @@
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+ void CopyFlag(Flag f, HValue* other) {
+ if (other->CheckFlag(f)) SetFlag(f);
+ }
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 0b25984..99687ca 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -7524,7 +7524,7 @@
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- TypeInfo info = expr->type();
+ Handle<Type> info = expr->type();
Representation rep = Representation::FromType(info);
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
@@ -7828,41 +7828,6 @@
return value;
}
- if (expected_obj->Is(Type::Null())) {
- *expected = handle(Type::Union(
- expected_number, handle(Type::Smi(), isolate())), isolate());
- IfBuilder if_null(this);
- if_null.If<HCompareObjectEqAndBranch>(value,
- graph()->GetConstantNull());
- if_null.Then();
- Push(graph()->GetConstant0());
- if_null.Else();
- Push(value);
- if_null.End();
- return Pop();
- }
-
- if (expected_obj->Is(Type::Boolean())) {
- *expected = handle(Type::Union(
- expected_number, handle(Type::Smi(), isolate())), isolate());
- IfBuilder if_true(this);
- if_true.If<HCompareObjectEqAndBranch>(value,
- graph()->GetConstantTrue());
- if_true.Then();
- Push(graph()->GetConstant1());
- if_true.Else();
- IfBuilder if_false(this);
- if_false.If<HCompareObjectEqAndBranch>(value,
- graph()->GetConstantFalse());
- if_false.Then();
- Push(graph()->GetConstant0());
- if_false.Else();
- Push(value);
- if_false.End();
- if_true.End();
- return Pop();
- }
-
return value;
}
@@ -7888,7 +7853,8 @@
Handle<Type> left_type,
Handle<Type> right_type,
Handle<Type> result_type,
- Maybe<int> fixed_right_arg) {
+ Maybe<int> fixed_right_arg,
+ bool binop_stub) {
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
@@ -7917,75 +7883,92 @@
right_rep = Representation::FromType(right_type);
}
+ if (binop_stub) {
+ left = EnforceNumberType(left, left_type);
+ right = EnforceNumberType(right, right_type);
+ }
+
Representation result_rep = Representation::FromType(result_type);
- bool is_string_add = op == Token::ADD &&
- (left_type->Is(Type::String()) ||
- right_type->Is(Type::String()));
+ bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
+ (right_rep.IsTagged() && !right_rep.IsSmi());
+ bool is_string_add = op == Token::ADD &&
+ (left_type->Is(Type::String()) ||
+ right_type->Is(Type::String()));
HInstruction* instr = NULL;
- switch (op) {
- case Token::ADD:
- if (is_string_add) {
- StringAddFlags flags = STRING_ADD_CHECK_BOTH;
- if (left_type->Is(Type::String())) {
- BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- flags = STRING_ADD_CHECK_RIGHT;
+ // Only the stub is allowed to call into the runtime, since otherwise we would
+ // inline several instructions (including the two pushes) for every tagged
+ // operation in optimized code, which is more expensive, than a stub call.
+ if (binop_stub && is_non_primitive && !is_string_add) {
+ HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
+ Add<HPushArgument>(left);
+ Add<HPushArgument>(right);
+ instr = NewUncasted<HInvokeFunction>(function, 2);
+ } else {
+ switch (op) {
+ case Token::ADD:
+ if (is_string_add) {
+ StringAddFlags flags = STRING_ADD_CHECK_BOTH;
+ if (left_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
+ AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
+ flags = STRING_ADD_CHECK_RIGHT;
+ }
+ if (right_type->Is(Type::String())) {
+ BuildCheckHeapObject(right);
+ AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
+ flags = (flags == STRING_ADD_CHECK_BOTH)
+ ? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
+ }
+ instr = NewUncasted<HStringAdd>(left, right, flags);
+ } else {
+ instr = NewUncasted<HAdd>(left, right);
}
- if (right_type->Is(Type::String())) {
- BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- flags = (flags == STRING_ADD_CHECK_BOTH)
- ? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
- }
- instr = NewUncasted<HStringAdd>(left, right, flags);
- } else {
- instr = NewUncasted<HAdd>(left, right);
- }
- break;
- case Token::SUB:
- instr = NewUncasted<HSub>(left, right);
- break;
- case Token::MUL:
- instr = NewUncasted<HMul>(left, right);
- break;
- case Token::MOD:
- instr = NewUncasted<HMod>(left, right, fixed_right_arg);
- break;
- case Token::DIV:
- instr = NewUncasted<HDiv>(left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(op, left, right);
- break;
- case Token::BIT_OR: {
- HValue* operand, *shift_amount;
- if (left_type->Is(Type::Signed32()) &&
- right_type->Is(Type::Signed32()) &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = NewUncasted<HRor>(operand, shift_amount);
- } else {
+ break;
+ case Token::SUB:
+ instr = NewUncasted<HSub>(left, right);
+ break;
+ case Token::MUL:
+ instr = NewUncasted<HMul>(left, right);
+ break;
+ case Token::MOD:
+ instr = NewUncasted<HMod>(left, right, fixed_right_arg);
+ break;
+ case Token::DIV:
+ instr = NewUncasted<HDiv>(left, right);
+ break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
instr = NewUncasted<HBitwise>(op, left, right);
+ break;
+ case Token::BIT_OR: {
+ HValue* operand, *shift_amount;
+ if (left_type->Is(Type::Signed32()) &&
+ right_type->Is(Type::Signed32()) &&
+ MatchRotateRight(left, right, &operand, &shift_amount)) {
+ instr = NewUncasted<HRor>(operand, shift_amount);
+ } else {
+ instr = NewUncasted<HBitwise>(op, left, right);
+ }
+ break;
}
- break;
+ case Token::SAR:
+ instr = NewUncasted<HSar>(left, right);
+ break;
+ case Token::SHR:
+ instr = NewUncasted<HShr>(left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
+ CanBeZero(right)) {
+ graph()->RecordUint32Instruction(instr);
+ }
+ break;
+ case Token::SHL:
+ instr = NewUncasted<HShl>(left, right);
+ break;
+ default:
+ UNREACHABLE();
}
- case Token::SAR:
- instr = NewUncasted<HSar>(left, right);
- break;
- case Token::SHR:
- instr = NewUncasted<HShr>(left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
- CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
- }
- break;
- case Token::SHL:
- instr = NewUncasted<HShl>(left, right);
- break;
- default:
- UNREACHABLE();
}
if (instr->IsBinaryOperation()) {
@@ -7993,6 +7976,19 @@
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
+ if (binop_stub) {
+ // Stub should not call into stub.
+ instr->SetFlag(HValue::kCannotBeTagged);
+ // And should truncate on HForceRepresentation already.
+ if (left->IsForceRepresentation()) {
+ left->CopyFlag(HValue::kTruncatingToSmi, instr);
+ left->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ if (right->IsForceRepresentation()) {
+ right->CopyFlag(HValue::kTruncatingToSmi, instr);
+ right->CopyFlag(HValue::kTruncatingToInt32, instr);
+ }
+ }
}
return instr;
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 31a9c45..271835b 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1287,7 +1287,8 @@
Handle<Type> left_type,
Handle<Type> right_type,
Handle<Type> result_type,
- Maybe<int> fixed_right_arg);
+ Maybe<int> fixed_right_arg,
+ bool binop_stub = false);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 83613cc..56798d3 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -293,6 +293,18 @@
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -480,18 +492,6 @@
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -499,32 +499,11 @@
Label* non_float,
Register scratch);
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
};
@@ -668,1259 +647,6 @@
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, ¬_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, ¬_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(¬_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ Cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(¬_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats, not_int32, right_arg_changed;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, ¬_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, ¬_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, ¬_int32, xmm0, ebx, ecx, xmm2);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, ¬_int32, xmm1, edi, ecx, xmm2);
- if (op_ == Token::MOD) {
- if (encoded_right_arg_.has_value) {
- __ cmp(edi, Immediate(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, ¬_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(¬_floats);
- __ bind(¬_int32);
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, ¬_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ Cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(¬_floats);
- __ bind(¬_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, ¬_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, ¬_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, ¬_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, ¬_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(¬_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, ¬_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ Cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(¬_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(¬_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ Cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -2231,79 +957,6 @@
}
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- __ TruncateHeapNumberToI(edx, edx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- __ TruncateHeapNumberToI(ecx, eax);
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2353,95 +1006,6 @@
}
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ Cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ Cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ Cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -4354,6 +2918,8 @@
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ PlatformFeatureScope sse2(SSE2);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 649bf9c..bb32bc5 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -212,6 +212,7 @@
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 3662d21..d73fdfb 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1127,26 +1127,31 @@
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -2271,6 +2276,8 @@
__ PrepareCallCFunction(4, eax);
X87Mov(Operand(esp, 1 * kDoubleSize), right);
X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
@@ -4435,12 +4442,9 @@
ToExternalReference(LConstantOperand::cast(instr->object())))
: MemOperand(ToRegister(instr->object()), offset);
if (instr->value()->IsConstantOperand()) {
+ ASSERT(!representation.IsByte());
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (representation.IsByte()) {
- __ mov_b(operand, ToInteger32(operand_value));
- } else {
- __ mov(operand, Immediate(ToInteger32(operand_value)));
- }
+ __ mov(operand, Immediate(ToInteger32(operand_value)));
} else {
Register value = ToRegister(instr->value());
if (representation.IsByte()) {
@@ -5014,14 +5018,21 @@
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ LOperand* temp = instr->temp();
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+ } else {
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+ }
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 357dcaf..54ea04e 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -121,6 +121,10 @@
void X87Fxch(X87Register reg, int other_slot = 0) {
x87_stack_.Fxch(reg, other_slot);
}
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
bool X87StackEmpty() {
return x87_stack_.depth() == 0;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 83b4138..a4d2e18 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2422,7 +2422,11 @@
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
- if (needs_write_barrier) {
+ if (instr->field_representation().IsByte()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 64b5683..16b8bd7 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -253,8 +253,8 @@
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fist_s(MemOperand(esp, 0));
fld(0);
+ fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@@ -453,6 +453,7 @@
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
+ ASSERT(!Serializer::enabled());
Label done;
cmp(src, Immediate(0));
movdbl(scratch,
@@ -464,6 +465,20 @@
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ ASSERT(!Serializer::enabled());
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ fld_d(Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ba09341..33af535 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -515,6 +515,7 @@
}
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32NoSSE2(Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
diff --git a/src/ic.cc b/src/ic.cc
index 9714274..be362d2 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -2329,11 +2329,6 @@
}
-void BinaryOpIC::patch(Code* code) {
- set_target(code);
-}
-
-
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
@@ -2348,256 +2343,68 @@
}
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
- switch (type_info) {
- case UNINITIALIZED:
- return ::v8::internal::UNINITIALIZED;
- case SMI:
- case INT32:
- case NUMBER:
- case ODDBALL:
- case STRING:
- return MONOMORPHIC;
- case GENERIC:
- return ::v8::internal::GENERIC;
- }
- UNREACHABLE();
- return ::v8::internal::UNINITIALIZED;
-}
+MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
+ Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
+ BinaryOpStub stub(extra_ic_state);
+ Handle<Type> left_type = stub.GetLeftType(isolate());
+ Handle<Type> right_type = stub.GetRightType(isolate());
+ bool smi_was_enabled = left_type->Maybe(Type::Smi()) &&
+ right_type->Maybe(Type::Smi());
-Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type,
- Isolate* isolate) {
- switch (binary_type) {
- case UNINITIALIZED:
- return handle(Type::None(), isolate);
- case SMI:
- return handle(Type::Smi(), isolate);
- case INT32:
- return handle(Type::Signed32(), isolate);
- case NUMBER:
- return handle(Type::Number(), isolate);
- case ODDBALL:
- return handle(Type::Optional(
- handle(Type::Union(
- handle(Type::Number(), isolate),
- handle(Type::String(), isolate)), isolate)), isolate);
- case STRING:
- return handle(Type::String(), isolate);
- case GENERIC:
- return handle(Type::Any(), isolate);
- }
- UNREACHABLE();
- return handle(Type::Any(), isolate);
-}
-
-
-void BinaryOpIC::StubInfoToType(int minor_key,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
- Isolate* isolate) {
- TypeInfo left_typeinfo, right_typeinfo, result_typeinfo;
- BinaryOpStub::decode_types_from_minor_key(
- minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo);
- *left = TypeInfoToType(left_typeinfo, isolate);
- *right = TypeInfoToType(right_typeinfo, isolate);
- *result = TypeInfoToType(result_typeinfo, isolate);
-}
-
-
-static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
- Token::Value op) {
- v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
- if (type.IsSmi()) return BinaryOpIC::SMI;
- if (type.IsInteger32()) {
- if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- if (type.IsNumber()) return BinaryOpIC::NUMBER;
- if (type.IsString()) return BinaryOpIC::STRING;
- if (value->IsUndefined()) {
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- return BinaryOpIC::ODDBALL;
- }
- return BinaryOpIC::GENERIC;
-}
-
-
-static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
- Handle<Object> value,
- Token::Value op) {
- BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
- if (old_type == BinaryOpIC::STRING) {
- if (new_type == BinaryOpIC::STRING) return new_type;
- return BinaryOpIC::GENERIC;
- }
- return Max(old_type, new_type);
-}
-
+ Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
#ifdef DEBUG
-static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
- BinaryOpIC::TypeInfo right,
- Maybe<int32_t> fixed_right_arg,
- BinaryOpIC::TypeInfo result) {
- PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
- if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
- PrintF("->%s", BinaryOpIC::GetName(result));
-}
+ if (FLAG_trace_ic) {
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ stream.Add("[");
+ stub.PrintName(&stream);
+
+ stub.UpdateStatus(left, right, result);
+
+ stream.Add(" => ");
+ stub.PrintState(&stream);
+ stream.Add(" ");
+ stream.OutputToStdOut();
+ PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate())));
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF("]\n");
+ } else {
+ stub.UpdateStatus(left, right, result);
+ }
+#else
+ stub.UpdateStatus(left, right, result);
#endif
+ Handle<Code> code = stub.GetCode(isolate());
+ set_target(*code);
-RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 3);
+ left_type = stub.GetLeftType(isolate());
+ right_type = stub.GetRightType(isolate());
+ bool enable_smi = left_type->Maybe(Type::Smi()) &&
+ right_type->Maybe(Type::Smi());
+ if (!smi_was_enabled && enable_smi) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ } else if (smi_was_enabled && !enable_smi) {
+ PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+ }
+
+ return result.has_value
+ ? static_cast<MaybeObject*>(*result.value)
+ : Failure::Exception();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
- int key = args.smi_at(2);
- Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
-
- BinaryOpIC::TypeInfo previous_left, previous_right, previous_result;
- BinaryOpStub::decode_types_from_minor_key(
- key, &previous_left, &previous_right, &previous_result);
-
- BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
- BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
-
- // STRING is only used for ADD operations.
- if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
- op != Token::ADD) {
- new_left = new_right = BinaryOpIC::GENERIC;
- }
-
- BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
- BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
-
- Maybe<int> previous_fixed_right_arg =
- BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
-
- int32_t value;
- bool new_has_fixed_right_arg =
- op == Token::MOD &&
- right->ToInt32(&value) &&
- BinaryOpStub::can_encode_arg_value(value) &&
- (previous_overall == BinaryOpIC::UNINITIALIZED ||
- (previous_fixed_right_arg.has_value &&
- previous_fixed_right_arg.value == value));
- Maybe<int32_t> new_fixed_right_arg(
- new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
-
- if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
- if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
- if (op == Token::DIV ||
- op == Token::MUL ||
- op == Token::SHR ||
- SmiValuesAre32Bits()) {
- // Arithmetic on two Smi inputs has yielded a heap number.
- // That is the only way to get here from the Smi stub.
- // With 32-bit Smis, all overflows give heap numbers, but with
- // 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::NUMBER;
- } else {
- // Other operations on SMIs that overflow yield int32s.
- result_type = BinaryOpIC::INT32;
- }
- }
- if (new_overall == BinaryOpIC::INT32 &&
- previous_overall == BinaryOpIC::INT32) {
- if (new_left == previous_left && new_right == previous_right) {
- result_type = BinaryOpIC::NUMBER;
- }
- }
- }
-
- BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
- Handle<Code> code = stub.GetCode(isolate);
- if (!code.is_null()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC in ");
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
- PrintF(" ");
- TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
- previous_result);
- PrintF(" => ");
- TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
- PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
- }
-#endif
- BinaryOpIC ic(isolate);
- ic.patch(*code);
-
- // Activate inlined smi code.
- if (previous_overall == BinaryOpIC::UNINITIALIZED) {
- PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
- }
- }
-
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Object* builtin = NULL; // Initialization calms down the compiler.
- switch (op) {
- case Token::ADD:
- builtin = builtins->javascript_builtin(Builtins::ADD);
- break;
- case Token::SUB:
- builtin = builtins->javascript_builtin(Builtins::SUB);
- break;
- case Token::MUL:
- builtin = builtins->javascript_builtin(Builtins::MUL);
- break;
- case Token::DIV:
- builtin = builtins->javascript_builtin(Builtins::DIV);
- break;
- case Token::MOD:
- builtin = builtins->javascript_builtin(Builtins::MOD);
- break;
- case Token::BIT_AND:
- builtin = builtins->javascript_builtin(Builtins::BIT_AND);
- break;
- case Token::BIT_OR:
- builtin = builtins->javascript_builtin(Builtins::BIT_OR);
- break;
- case Token::BIT_XOR:
- builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
- break;
- case Token::SHR:
- builtin = builtins->javascript_builtin(Builtins::SHR);
- break;
- case Token::SAR:
- builtin = builtins->javascript_builtin(Builtins::SAR);
- break;
- case Token::SHL:
- builtin = builtins->javascript_builtin(Builtins::SHL);
- break;
- default:
- UNREACHABLE();
- }
-
- Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
-
- bool caught_exception;
- Handle<Object> builtin_args[] = { right };
- Handle<Object> result = Execution::Call(isolate,
- builtin_function,
- left,
- ARRAY_SIZE(builtin_args),
- builtin_args,
- &caught_exception);
- if (caught_exception) {
- return Failure::Exception();
- }
- return *result;
+ BinaryOpIC ic(isolate);
+ return ic.Transition(left, right);
}
@@ -2898,6 +2705,47 @@
}
+Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
+ switch (op) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ return Builtins::ADD;
+ break;
+ case Token::SUB:
+ return Builtins::SUB;
+ break;
+ case Token::MUL:
+ return Builtins::MUL;
+ break;
+ case Token::DIV:
+ return Builtins::DIV;
+ break;
+ case Token::MOD:
+ return Builtins::MOD;
+ break;
+ case Token::BIT_OR:
+ return Builtins::BIT_OR;
+ break;
+ case Token::BIT_AND:
+ return Builtins::BIT_AND;
+ break;
+ case Token::BIT_XOR:
+ return Builtins::BIT_XOR;
+ break;
+ case Token::SAR:
+ return Builtins::SAR;
+ break;
+ case Token::SHR:
+ return Builtins::SHR;
+ break;
+ case Token::SHL:
+ return Builtins::SHL;
+ break;
+ }
+}
+
+
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
Code::ExtraICState extra_ic_state) {
ToBooleanStub stub(extra_ic_state);
diff --git a/src/ic.h b/src/ic.h
index 35a3107..ebea083 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -57,8 +57,8 @@
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
- ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
+ ICU(BinaryOpIC_Miss) \
ICU(CompareNilIC_Miss) \
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
@@ -736,22 +736,14 @@
GENERIC
};
- static void StubInfoToType(int minor_key,
- Handle<Type>* left,
- Handle<Type>* right,
- Handle<Type>* result,
- Isolate* isolate);
+ explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
- explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
- void patch(Code* code);
+ static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
static const char* GetName(TypeInfo type_info);
- static State ToState(TypeInfo type_info);
-
- private:
- static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate);
+ MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
+ Handle<Object> right);
};
@@ -858,6 +850,7 @@
DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index df0f14a..49936d7 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -648,6 +648,8 @@
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+ heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
+
// Ready to start incremental marking.
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Running\n");
diff --git a/src/isolate.cc b/src/isolate.cc
index 90bcb0f..29ead29 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -2321,6 +2321,7 @@
DONT_TRACK_ALLOCATION_SITE, 0);
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
+ BinaryOpStub::InitializeForIsolate(this);
CompareNilICStub::InitializeForIsolate(this);
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
diff --git a/src/log.cc b/src/log.cc
index 92555b9..0f36bfb 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1610,7 +1610,12 @@
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: // fall through
+ case Code::BINARY_OP_IC: {
+ BinaryOpStub stub(code_object->extended_extra_ic_state());
+ description = stub.GetName().Detach();
+ tag = Logger::STUB_TAG;
+ break;
+ }
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index d9cb8dc..65d838c 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -91,10 +91,8 @@
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !rinfo->target_object()->IsMap() ||
- !Map::cast(rinfo->target_object())->CanTransition()) {
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
+ rinfo->target_object())) {
VisitPointer(rinfo->target_object_address());
}
}
@@ -433,9 +431,8 @@
#endif
#ifdef VERIFY_HEAP
- if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
- heap()->weak_embedded_maps_verification_enabled()) {
- VerifyWeakEmbeddedMapsInOptimizedCode();
+ if (heap()->weak_embedded_objects_verification_enabled()) {
+ VerifyWeakEmbeddedObjectsInOptimizedCode();
}
if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
VerifyOmittedMapChecks();
@@ -501,7 +498,7 @@
}
-void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
HeapObjectIterator code_iterator(heap()->code_space());
for (HeapObject* obj = code_iterator.Next();
obj != NULL;
@@ -509,7 +506,7 @@
Code* code = Code::cast(obj);
if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
if (WillBeDeoptimized(code)) continue;
- code->VerifyEmbeddedMapsDependency();
+ code->VerifyEmbeddedObjectsDependency();
}
}
@@ -1473,7 +1470,7 @@
// Mark the backing hash table without pushing it on the marking stack.
Object* table_object = weak_collection->table();
if (!table_object->IsHashTable()) return;
- ObjectHashTable* table = ObjectHashTable::cast(table_object);
+ WeakHashTable* table = WeakHashTable::cast(table_object);
Object** table_slot =
HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
MarkBit table_mark = Marking::MarkBitFrom(table);
@@ -2115,6 +2112,8 @@
// Handle the string table specially.
MarkStringTable(visitor);
+ MarkWeakObjectToCodeTable();
+
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
RefillMarkingDeque();
@@ -2155,6 +2154,16 @@
}
+void MarkCompactCollector::MarkWeakObjectToCodeTable() {
+ HeapObject* weak_object_to_code_table =
+ HeapObject::cast(heap()->weak_object_to_code_table());
+ if (!IsMarked(weak_object_to_code_table)) {
+ MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
+ SetMark(weak_object_to_code_table, mark);
+ }
+}
+
+
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
@@ -2522,7 +2531,8 @@
if (map_mark.Get()) {
ClearNonLiveDependentCode(map->dependent_code());
} else {
- ClearAndDeoptimizeDependentCode(map);
+ ClearAndDeoptimizeDependentCode(map->dependent_code());
+ map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
}
@@ -2536,6 +2546,31 @@
ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
}
}
+
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ uint32_t capacity = table->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ Object* value = table->get(value_index);
+ if (IsMarked(key)) {
+ if (!IsMarked(value)) {
+ HeapObject* obj = HeapObject::cast(value);
+ MarkBit mark = Marking::MarkBitFrom(obj);
+ SetMark(obj, mark);
+ }
+ ClearNonLiveDependentCode(DependentCode::cast(value));
+ } else {
+ ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
+ table->set(key_index, heap_->the_hole_value());
+ table->set(value_index, heap_->the_hole_value());
+ }
+ }
+ }
}
@@ -2601,9 +2636,9 @@
}
-void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
+void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
+ DependentCode* entries) {
DisallowHeapAllocation no_allocation;
- DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
@@ -2619,7 +2654,6 @@
}
entries->clear_at(i);
}
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
}
@@ -3457,6 +3491,13 @@
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
heap_->string_table()->Iterate(&updating_visitor);
+ updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
+ if (heap_->weak_object_to_code_table()->IsHashTable()) {
+ WeakHashTable* table =
+ WeakHashTable::cast(heap_->weak_object_to_code_table());
+ table->Iterate(&updating_visitor);
+ table->Rehash(heap_->undefined_value());
+ }
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 4474864..aea5e1c 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -637,7 +637,7 @@
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedMapsInOptimizedCode();
+ void VerifyWeakEmbeddedObjectsInOptimizedCode();
void VerifyOmittedMapChecks();
#endif
@@ -735,6 +735,10 @@
return sequential_sweeping_;
}
+ // Mark the global table which maps weak objects to dependent code without
+ // marking its contents.
+ void MarkWeakObjectToCodeTable();
+
private:
MarkCompactCollector();
~MarkCompactCollector();
@@ -884,7 +888,7 @@
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearAndDeoptimizeDependentCode(Map* map);
+ void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
void ClearNonLiveDependentCode(DependentCode* dependent_code);
// Marking detaches initial maps from SharedFunctionInfo objects
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 5cdfecc..3f59932 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1227,958 +1227,18 @@
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in v0.
-// Register heap_number_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// a0: Left value (least significant part of mantissa).
-// a1: Left value (sign, exponent, top of mantissa).
-// a2: Right value (least significant part of mantissa).
-// a3: Right value (sign, exponent, top of mantissa).
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // FPU is a base requirement for V8.
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(a1, a0);
-
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = a1;
- Register right = a0;
-
- Register scratch1 = t0;
- Register scratch2 = t1;
-
- ASSERT(right.is(a0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::MUL: {
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(scratch1, right);
- // Do multiplication.
- // lo = lower 32 bits of scratch1 * left.
- // hi = higher 32 bits of scratch1 * left.
- __ Mult(left, scratch1);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2));
- // Go slow on zero result to handle -0.
- __ mflo(v0);
- __ Ret(ne, v0, Operand(zero_reg));
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ Addu(scratch2, right, left);
- Label skip;
- // ARM uses the 'pl' condition, which is 'ge'.
- // Negating it results in 'lt'.
- __ Branch(&skip, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
- __ bind(&skip);
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- }
- break;
- case Token::DIV: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by zero before getting the result.
- __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividsor (right) is
- // positive, otherwise it is a -0 case.
- // Quotient is in 'lo', remainder is in 'hi'.
- // Check for no remainder first.
- __ mfhi(scratch1);
- __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
- __ mflo(scratch1);
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
- __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch1);
- }
- break;
- case Token::MOD: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by 0 before calling mfhi.
- // Check for zero on the right hand side.
- __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividend (left) is
- // positive (or 0), otherwise it is a -0 case.
- // Remainder is in 'hi'.
- __ mfhi(scratch2);
- __ Branch(&done, ne, scratch2, Operand(zero_reg));
- __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch1, scratch2, Operand(0x40000000));
- __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch2);
- }
- break;
- case Token::BIT_OR:
- __ Ret(USE_DELAY_SLOT);
- __ or_(v0, left, right);
- break;
- case Token::BIT_AND:
- __ Ret(USE_DELAY_SLOT);
- __ and_(v0, left, right);
- break;
- case Token::BIT_XOR:
- __ Ret(USE_DELAY_SLOT);
- __ xor_(v0, left, right);
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(scratch1, left, scratch1);
- // Smi tag result.
- __ And(v0, scratch1, ~kSmiTagMask);
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(v0, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ And(scratch1, v0, Operand(0xc0000000));
- __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
- // Smi tag result.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0);
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT);
- __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(¬_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into f12 and f14.
- if (smi_operands) {
- __ SmiUntag(scratch1, a0);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ SmiUntag(scratch1, a1);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- } else {
- // Load right operand to f14.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using FPU registers:
- // f12: Left value.
- // f14: Right value.
- switch (op) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(a3, left);
- __ SmiUntag(a2, right);
- } else {
- // Convert operands to 32-bit integers. Right in a2 and left in a3.
- __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
- }
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi.
- __ Addu(a3, a2, Operand(0x40000000));
- __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = t1;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // a2: Answer as signed int32.
- // t1: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to v0, which is the
- // result.
- __ mov(v0, t1);
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, ¬_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, ¬_smis, op, mode);
- }
- __ bind(¬_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Branch(&right_arg_changed,
- ne,
- a0,
- Operand(Smi::FromInt(fixed_right_arg_value())));
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ GetObjectType(left, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- FPURegister double_scratch = f0;
- FPURegister single_scratch = f6;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ Or(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
-
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- Register except_flag = scratch2;
- const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
- kRoundToMinusInf : kRoundToZero;
- const CheckForInexactConversion kConversion = op_ == Token::DIV ?
- kCheckForInexactConversion : kDontCheckForInexactConversion;
- __ EmitFPUTruncate(kRoundingMode,
- scratch1,
- f10,
- at,
- f16,
- except_flag,
- kConversion);
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
- __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ Branch(¬_zero, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch2, f11);
- __ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(&transition, ne, scratch2, Operand(zero_reg));
- __ bind(¬_zero);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, scratch1);
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using FPU registers so s0 is available.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sdc1(f10,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Move(f16, fixed_right_arg_value());
- __ BranchF(&transition, NULL, ne, f14, f16);
- }
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ Branch(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in a2 and left in a3. The
- // registers a0 and a1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(
- left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
- __ LoadNumberAsInt32(
- right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- __ And(a2, a2, Operand(0x1f));
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- __ And(a2, a2, Operand(0x1f));
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (a2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- break;
- case Token::SHL:
- __ And(a2, a2, Operand(0x1f));
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch1, a2, Operand(0x40000000));
- // If not try to return a heap number. (We know the result is an int32.)
- __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
- // Tag the result and return.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- __ bind(&return_heap_number);
- heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ sdc1(double_scratch,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&check, ne, a1, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, ne, a0, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = a1;
- Register right = a0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ GetObjectType(left, a2, a2);
- __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(a0) && !result.is(a1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ Branch(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, overwritable_operand);
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(a1, a0);
-}
-
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
@@ -2648,6 +1708,7 @@
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index bbcf204..192bed5 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -807,26 +807,31 @@
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index ad13d7f..caf0baf 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -665,16 +665,20 @@
}
-void Code::VerifyEmbeddedMapsDependency() {
+void Code::VerifyEmbeddedObjectsDependency() {
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Map* map = Map::cast(it.rinfo()->target_object());
- if (map->CanTransition()) {
+ Object* obj = it.rinfo()->target_object();
+ if (IsWeakEmbeddedObject(kind(), obj)) {
+ if (obj->IsMap()) {
+ Map* map = Map::cast(obj);
CHECK(map->dependent_code()->Contains(
DependentCode::kWeaklyEmbeddedGroup, this));
+ } else if (obj->IsJSObject()) {
+ Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
+ WeakHashTable* table = WeakHashTable::cast(raw_table);
+ CHECK(DependentCode::cast(table->Lookup(obj))->Contains(
+ DependentCode::kWeaklyEmbeddedGroup, this));
}
}
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 2d0068c..c16275b 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -5955,6 +5955,34 @@
}
+template <int entrysize>
+bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
+ return key->SameValue(other);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) {
+ intptr_t hash = reinterpret_cast<intptr_t>(key);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key,
+ Object* other) {
+ intptr_t hash = reinterpret_cast<intptr_t>(other);
+ return (uint32_t)(hash & 0xFFFFFFFF);
+}
+
+
+template <int entrysize>
+MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap,
+ Object* key) {
+ return key;
+}
+
+
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 46cc9d7..d5dd44e 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -261,10 +261,8 @@
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
- if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
- rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
- !object->IsMap() || !Map::cast(object)->CanTransition()) {
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) {
StaticVisitor::MarkObject(heap, object);
}
}
diff --git a/src/objects.cc b/src/objects.cc
index d0880b7..d8a6f13 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -7732,11 +7732,11 @@
}
-MaybeObject* FixedArray::CopySize(int new_length) {
+MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) {
Heap* heap = GetHeap();
if (new_length == 0) return heap->empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length);
+ { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* result = FixedArray::cast(obj);
@@ -11022,6 +11022,22 @@
}
+bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) {
+ if (kind != Code::OPTIMIZED_FUNCTION) return false;
+
+ if (object->IsMap()) {
+ return Map::cast(object)->CanTransition() &&
+ FLAG_collect_maps &&
+ FLAG_weak_embedded_maps_in_optimized_code;
+ }
+
+ if (object->IsJSObject()) {
+ return FLAG_weak_embedded_objects_in_optimized_code;
+ }
+
+ return false;
+}
+
MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
int capacity,
int length) {
@@ -11382,7 +11398,7 @@
int capacity = kCodesStartIndex + number_of_entries + 1;
if (capacity > 5) capacity = capacity * 5 / 4;
Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
- factory->CopySizeFixedArray(entries, capacity));
+ factory->CopySizeFixedArray(entries, capacity, TENURED));
// The number of codes can change after GC.
starts.Recompute(*entries);
start = starts.at(group);
@@ -13887,7 +13903,9 @@
template<typename Shape, typename Key>
-MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
+MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n,
+ Key key,
+ PretenureFlag pretenure) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
int nod = NumberOfDeletedElements();
@@ -13900,14 +13918,14 @@
}
const int kMinCapacityForPretenure = 256;
- bool pretenure =
- (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
+ bool should_pretenure = pretenure == TENURED ||
+ ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this));
Object* obj;
{ MaybeObject* maybe_obj =
Allocate(GetHeap(),
nof * 2,
USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure ? TENURED : NOT_TENURED);
+ should_pretenure ? TENURED : NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -13975,6 +13993,8 @@
template class HashTable<ObjectHashTableShape<2>, Object*>;
+template class HashTable<WeakHashTableShape<2>, Object*>;
+
template class Dictionary<NameDictionaryShape, Name*>;
template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
@@ -15617,6 +15637,41 @@
}
+Object* WeakHashTable::Lookup(Object* key) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return GetHeap()->the_hole_value();
+ return get(EntryToValueIndex(entry));
+}
+
+
+MaybeObject* WeakHashTable::Put(Object* key, Object* value) {
+ ASSERT(IsKey(key));
+ int entry = FindEntry(key);
+ // Key is already in table, just overwrite value.
+ if (entry != kNotFound) {
+ set(EntryToValueIndex(entry), value);
+ return this;
+ }
+
+ // Check whether the hash table should be extended.
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ WeakHashTable* table = WeakHashTable::cast(obj);
+ table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value);
+ return table;
+}
+
+
+void WeakHashTable::AddEntry(int entry, Object* key, Object* value) {
+ set(EntryToIndex(entry), key);
+ set(EntryToValueIndex(entry), value);
+ ElementAdded();
+}
+
+
DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator(
DeclaredAccessorDescriptor* descriptor)
: array_(descriptor->serialized_data()->GetDataStartAddress()),
diff --git a/src/objects.h b/src/objects.h
index aa1f5c8..4d8a45b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1044,7 +1044,8 @@
V(AccessCheckNeeded) \
V(Cell) \
V(PropertyCell) \
- V(ObjectHashTable)
+ V(ObjectHashTable) \
+ V(WeakHashTable)
#define ERROR_MESSAGES_LIST(V) \
@@ -2891,7 +2892,8 @@
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
- MUST_USE_RESULT MaybeObject* CopySize(int new_length);
+ MUST_USE_RESULT MaybeObject* CopySize(int new_length,
+ PretenureFlag pretenure = NOT_TENURED);
// Add the elements of a JSArray to this FixedArray.
MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array);
@@ -3526,7 +3528,10 @@
MUST_USE_RESULT MaybeObject* Shrink(Key key);
// Ensure enough space for n additional elements.
- MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
+ MUST_USE_RESULT MaybeObject* EnsureCapacity(
+ int n,
+ Key key,
+ PretenureFlag pretenure = NOT_TENURED);
};
@@ -3965,6 +3970,49 @@
};
+template <int entrysize>
+class WeakHashTableShape : public BaseShape<Object*> {
+ public:
+ static inline bool IsMatch(Object* key, Object* other);
+ static inline uint32_t Hash(Object* key);
+ static inline uint32_t HashForObject(Object* key, Object* object);
+ MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap,
+ Object* key);
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = entrysize;
+};
+
+
+// WeakHashTable maps keys that are arbitrary objects to object values.
+// It is used for the global weak hash table that maps objects
+// embedded in optimized code to dependent code lists.
+class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> {
+ public:
+ static inline WeakHashTable* cast(Object* obj) {
+ ASSERT(obj->IsHashTable());
+ return reinterpret_cast<WeakHashTable*>(obj);
+ }
+
+ // Looks up the value associated with the given key. The hole value is
+ // returned in case the key is not present.
+ Object* Lookup(Object* key);
+
+ // Adds (or overwrites) the value associated with the given key. Mapping a
+ // key to the hole value causes removal of the whole entry.
+ MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+
+ private:
+ friend class MarkCompactCollector;
+
+ void AddEntry(int entry, Object* key, Object* value);
+
+ // Returns the index to the value of an entry.
+ static inline int EntryToValueIndex(int entry) {
+ return EntryToIndex(entry) + 1;
+ }
+};
+
+
// JSFunctionResultCache caches results of some JSFunction invocation.
// It is a fixed array with fixed structure:
// [0]: factory function
@@ -4878,7 +4926,8 @@
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
+ kind == BINARY_OP_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.
@@ -5144,9 +5193,11 @@
bool CanDeoptAt(Address pc);
#ifdef VERIFY_HEAP
- void VerifyEmbeddedMapsDependency();
+ void VerifyEmbeddedObjectsDependency();
#endif
+ static bool IsWeakEmbeddedObject(Kind kind, Object* object);
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
diff --git a/src/platform/elapsed-timer.h b/src/platform/elapsed-timer.h
index 9016a22..b61b007 100644
--- a/src/platform/elapsed-timer.h
+++ b/src/platform/elapsed-timer.h
@@ -28,8 +28,8 @@
#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
#define V8_PLATFORM_ELAPSED_TIMER_H_
-#include "checks.h"
-#include "platform/time.h"
+#include "../checks.h"
+#include "time.h"
namespace v8 {
namespace internal {
diff --git a/src/platform/mutex.h b/src/platform/mutex.h
index 0f899ca..125e9d4 100644
--- a/src/platform/mutex.h
+++ b/src/platform/mutex.h
@@ -28,9 +28,9 @@
#ifndef V8_PLATFORM_MUTEX_H_
#define V8_PLATFORM_MUTEX_H_
-#include "lazy-instance.h"
+#include "../lazy-instance.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "../win32-headers.h"
#endif
#if V8_OS_POSIX
diff --git a/src/platform/semaphore.h b/src/platform/semaphore.h
index 2cfa142..0babe5f 100644
--- a/src/platform/semaphore.h
+++ b/src/platform/semaphore.h
@@ -28,9 +28,9 @@
#ifndef V8_PLATFORM_SEMAPHORE_H_
#define V8_PLATFORM_SEMAPHORE_H_
-#include "lazy-instance.h"
+#include "../lazy-instance.h"
#if V8_OS_WIN
-#include "win32-headers.h"
+#include "../win32-headers.h"
#endif
#if V8_OS_MACOSX
diff --git a/src/platform/time.h b/src/platform/time.h
index 25161f8..877e020 100644
--- a/src/platform/time.h
+++ b/src/platform/time.h
@@ -31,7 +31,7 @@
#include <ctime>
#include <limits>
-#include "allocation.h"
+#include "../allocation.h"
// Forward declarations.
extern "C" {
diff --git a/src/serialize.cc b/src/serialize.cc
index b3a7878..ef1461f 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -835,6 +835,8 @@
isolate_->heap()->undefined_value());
}
+ isolate_->heap()->InitializeWeakObjectToCodeTable();
+
// Update data pointers to the external strings containing natives sources.
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
Object* source = isolate_->heap()->natives_source_cache()->get(i);
diff --git a/src/spaces.cc b/src/spaces.cc
index 2faf419..b7fa32d 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1056,7 +1056,7 @@
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 64 * kPointerSize * KB;
+ size = 72 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
@@ -1077,7 +1077,14 @@
// upgraded to handle small pages.
size = AreaSize();
} else {
- size = 384 * KB;
+#if V8_TARGET_ARCH_MIPS
+ // On MIPS, code stubs seem to be quite a bit larger.
+ // TODO(olivf/MIPS folks): Can we do anything about this? Does it
+ // indicate the presence of a bug?
+ size = 464 * KB;
+#else
+ size = 416 * KB;
+#endif
}
break;
default:
@@ -2685,6 +2692,7 @@
// Try to expand the space and allocate in the new next page.
if (Expand()) {
+ ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
return free_list_.Allocate(size_in_bytes);
}
diff --git a/src/type-info.cc b/src/type-info.cc
index dbd37f7..da4d183 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -381,20 +381,29 @@
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg) {
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
- // For some binary ops we don't have ICs, e.g. Token::COMMA.
+ // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
+ // operations covered by the BinaryOpStub we should always have them.
+ ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN &&
+ operation <= BinaryOpStub::LAST_TOKEN));
*left = *right = *result = handle(Type::None(), isolate_);
return;
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT(code->is_binary_op_stub());
- int minor_key = code->stub_info();
- BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
- *fixed_right_arg =
- BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
+ BinaryOpStub stub(code->extended_extra_ic_state());
+
+ // Sanity check.
+ ASSERT(stub.operation() == operation);
+
+ *left = stub.GetLeftType(isolate());
+ *right = stub.GetRightType(isolate());
+ *result = stub.GetResultType(isolate());
+ *fixed_right_arg = stub.fixed_right_arg();
}
@@ -410,36 +419,15 @@
}
-TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
+ Handle<Type> unknown(Type::None(), isolate_);
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &unused_result_type);
- // CountOperations should always have +1 or -1 as their right input.
- ASSERT(right_type == BinaryOpIC::SMI ||
- right_type == BinaryOpIC::UNINITIALIZED);
-
- switch (left_type) {
- case BinaryOpIC::UNINITIALIZED:
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
- UNREACHABLE();
- return unknown;
+ BinaryOpStub stub(code->extended_extra_ic_state());
+ return stub.GetLeftType(isolate());
}
diff --git a/src/type-info.h b/src/type-info.h
index 4b376c8..7d7d7ea 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -301,7 +301,8 @@
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg);
+ Maybe<int>* fixed_right_arg,
+ Token::Value operation);
void CompareType(TypeFeedbackId id,
Handle<Type>* left,
@@ -310,7 +311,7 @@
Handle<Type> ClauseType(TypeFeedbackId id);
- TypeInfo IncrementType(CountOperation* expr);
+ Handle<Type> IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }
diff --git a/src/typing.cc b/src/typing.cc
index 3fd1ce5..e9da680 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -543,7 +543,7 @@
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg);
+ &left_type, &right_type, &type, &fixed_right_arg, expr->op());
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
diff --git a/src/unicode.h b/src/unicode.h
index 42a8182..f1dcad0 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -29,7 +29,7 @@
#define V8_UNICODE_H_
#include <sys/types.h>
-#include <globals.h>
+#include "globals.h"
/**
* \file
* Definitions and convenience functions for working with unicode.
diff --git a/src/v8-counters.h b/src/v8-counters.h
index ff2247c..c1541b0 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -204,7 +204,6 @@
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
- SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
diff --git a/src/version.cc b/src/version.cc
index 2af317c..9f7a3db 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 22
-#define BUILD_NUMBER 7
+#define BUILD_NUMBER 8
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 382fd10..1bdfda9 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -155,6 +155,18 @@
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
@@ -447,35 +459,8 @@
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
-
- // Tries to convert two values to smis losslessly.
- // This fails if either argument is not a Smi nor a HeapNumber,
- // or if it's a HeapNumber with a value that can't be converted
- // losslessly to a Smi. In that case, control transitions to the
- // on_not_smis label.
- // On success, either control goes to the on_success label (if one is
- // provided), or it falls through at the end of the code (if on_success
- // is NULL).
- // On success, both first and second holds Smi tagged values.
- // One of first or second must be non-Smi when entering.
- static void NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined);
};
@@ -563,569 +548,6 @@
}
-void BinaryOpStub::Initialize() {}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- __ Push(Smi::FromInt(MinorKey()));
-
- __ PushReturnAddressFrom(rcx);
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
-
- // Arguments to BinaryOpStub are in rdx and rax.
- const Register left = rdx;
- const Register right = rax;
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations and logical right shift by 0.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
- (op == Token::ADD || op == Token::SUB ||
- op == Token::MUL || op == Token::DIV || op == Token::SHR);
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label fail;
-
- if (op != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, ¬_smis);
- }
-
- Label smi_values;
- __ bind(&smi_values);
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
-
- if (use_fp_on_smis.is_linked()) {
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op == Token::DIV || op == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- if (op == Token::SHR) {
- __ SmiToInteger32(left, left);
- __ cvtqsi2sd(xmm0, left);
- } else {
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ jmp(&fail);
- }
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- // But first we check if non-smi values are HeapNumbers holding
- // values that could be smi.
- __ bind(¬_smis);
- Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::ConvertUndefined convert_undefined =
- FloatingPointHelper::BAILOUT_ON_UNDEFINED;
- // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
- }
- FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail, convert_undefined);
- __ jmp(&smi_values);
- __ bind(&fail);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure,
- Token::Value op,
- OverwriteMode mode) {
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, allocation_failure, mode);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rbx);
- __ jmp(allocation_failure);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
- }
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
- MacroAssembler* masm) {
- // Push arguments, but ensure they are under the return address
- // for a tail call.
- __ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
- __ PushReturnAddressFrom(rcx);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- if (call_runtime.is_linked()) {
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
- }
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- UNREACHABLE();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
- Register input,
- Label* fail) {
- Label ok;
- __ JumpIfSmi(input, &ok, Label::kNear);
- Register heap_number_map = r8;
- Register scratch1 = r9;
- Register scratch2 = r10;
- // HeapNumbers containing 32bit integer values are also allowed.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, fail);
- __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
- // Convert, convert back, and compare the two doubles' bits.
- __ cvttsd2siq(scratch2, xmm0);
- __ Cvtlsi2sd(xmm1, scratch2);
- __ movq(scratch1, xmm0);
- __ movq(scratch2, xmm1);
- __ cmpq(scratch1, scratch2);
- __ j(not_equal, fail);
- __ bind(&ok);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rax, ¬_number);
- }
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &gc_required, ¬_number, op_, mode_);
-
- __ bind(¬_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ push(rdx);
- __ push(rax);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -1432,67 +854,6 @@
}
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(r8, 0);
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in r8.
- __ TruncateHeapNumberToI(r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(rcx, 0);
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- __ TruncateHeapNumberToI(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ Cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ Cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
@@ -1523,83 +884,6 @@
}
-void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined) {
- Register heap_number_map = scratch3;
- Register smi_result = scratch1;
- Label done, maybe_undefined_first, maybe_undefined_second, first_done;
-
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label first_smi;
- __ JumpIfSmi(first, &first_smi, Label::kNear);
- __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_first
- : on_not_smis);
- // Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- // Check if conversion was successful by converting back and
- // comparing to the original double's bits.
- __ Cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(first, smi_result);
-
- __ bind(&first_done);
- __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
- __ bind(&first_smi);
- __ AssertNotSmi(second);
- __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_second
- : on_not_smis);
- // Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- __ Cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(second, smi_result);
- if (on_success != NULL) {
- __ jmp(on_success);
- } else {
- __ jmp(&done);
- }
-
- __ bind(&maybe_undefined_first);
- __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(first, first);
- __ jmp(&first_done);
-
- __ bind(&maybe_undefined_second);
- __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(second, second);
- if (on_success != NULL) {
- __ jmp(on_success);
- }
- // Else: fall through.
-
- __ bind(&done);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = rdx;
const Register base = rax;
@@ -3467,6 +2751,7 @@
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
}
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 0e98746..495ca74 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -89,9 +89,7 @@
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
+ RegisterDependentCodeForEmbeddedMaps(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -707,26 +705,31 @@
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index fe22691..d9b6e3f 100644
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -141,8 +141,8 @@
static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
__ movl(rax, Immediate(id));
- __ Move(rcx, Smi::FromInt(0));
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ Move(rcx, value);
+ __ Set(rdx, reinterpret_cast<intptr_t>(value));
__ cmpq(rcx, rdx);
__ j(not_equal, exit);
}
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 846472d..bb9abc9 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -58,6 +58,7 @@
['component=="shared_library"', {
'type': '<(component)',
'sources': [
+ '../../src/defaults.cc',
# Note: on non-Windows we still build this file so that gyp
# has some sources to link into the component.
'../../src/v8dll-main.cc',
@@ -270,7 +271,6 @@
'../../src/debug-agent.h',
'../../src/debug.cc',
'../../src/debug.h',
- '../../src/defaults.cc',
'../../src/deoptimizer.cc',
'../../src/deoptimizer.h',
'../../src/disasm.h',
@@ -854,6 +854,10 @@
'BUILDING_V8_SHARED',
'V8_SHARED',
],
+ }, {
+ 'sources': [
+ '../../src/defaults.cc',
+ ],
}],
['v8_postmortem_support=="true"', {
'sources': [