Version 3.2.3
Fixed a number of crash bugs.
Fixed Array::New(length) to return an array with a length (issue 1256).
Fixed FreeBSD build.
Changed __defineGetter__ to not throw (matching the behavior of Safari).
Implemented more of EcmaScript 5 strict mode.
Improved Crankshaft performance on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@7219 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 6652df2..10364eb 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -299,7 +299,8 @@
byte* Assembler::spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
- : positions_recorder_(this) {
+ : positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -2761,7 +2762,7 @@
Serializer::TooLateToEnableNow();
}
#endif
- if (!Serializer::enabled() && !FLAG_debug_code) {
+ if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index b60157c..7d9e374 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -522,6 +522,9 @@
Assembler(void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -982,6 +985,8 @@
static const int kMinimalBufferSize = 4*KB;
protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
@@ -1057,6 +1062,8 @@
PositionsRecorder positions_recorder_;
+ bool emit_debug_code_;
+
friend class PositionsRecorder;
};
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 6d7dfe6..01ea60c 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -5509,8 +5509,8 @@
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime);
- // Use the runtime system when adding two one character strings, as it
- // contains optimizations for this specific case using the symbol table.
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
__ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
@@ -5927,6 +5927,8 @@
// If entry is undefined no string with this hash can be found.
__ cmp(candidate, Factory::undefined_value());
__ j(equal, not_found);
+ __ cmp(candidate, Factory::null_value());
+ __ j(equal, &next_probe[i]);
// If length is not 2 the string is not a candidate.
__ cmp(FieldOperand(candidate, String::kLengthOffset),
@@ -6295,59 +6297,6 @@
}
-void StringCharAtStub::Generate(MacroAssembler* masm) {
- // Expects two arguments (object, index) on the stack:
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: index
- // esp[8]: object
-
- Register object = ebx;
- Register index = eax;
- Register scratch1 = ecx;
- Register scratch2 = edx;
- Register result = eax;
-
- __ pop(scratch1); // Return address.
- __ pop(index);
- __ pop(object);
- __ push(scratch1);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result, Immediate(Factory::empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm, call_helper);
-
- __ bind(&done);
- __ ret(0);
-}
-
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
NearLabel miss;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index fcb06d2..f1d4cac 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -9943,12 +9943,6 @@
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
deferred->Branch(not_equal);
- // Check that the key is within bounds. Both the key and the length of
- // the JSArray are smis. Use unsigned comparison to handle negative keys.
- __ cmp(key.reg(),
- FieldOperand(receiver.reg(), JSArray::kLengthOffset));
- deferred->Branch(above_equal);
-
// Get the elements array from the receiver and check that it is not a
// dictionary.
__ mov(tmp.reg(),
@@ -9974,6 +9968,14 @@
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
+ // Check that the key is within bounds. Both the key and the length of
+ // the JSArray are smis (because the fixed array check above ensures the
+ // elements are in fast case). Use unsigned comparison to handle negative
+ // keys.
+ __ cmp(key.reg(),
+ FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
// Store the value.
__ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index e0cbe35..a7d38ce 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -331,6 +331,7 @@
int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
+ int PrintRightXMMOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
int PrintImmediateOp(byte* data);
int F7Instruction(byte* data);
@@ -367,9 +368,11 @@
int DisassemblerIA32::PrintRightOperandHelper(
byte* modrmp,
- RegisterNameMapping register_name) {
+ RegisterNameMapping direct_register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, ®op, &rm);
+ RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+ &DisassemblerIA32::NameOfCPURegister;
switch (mod) {
case 0:
if (rm == ebp) {
@@ -454,6 +457,12 @@
}
+int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerIA32::NameOfXMMRegister);
+}
+
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerIA32::PrintOperands(const char* mnem,
@@ -937,7 +946,7 @@
get_modrm(*data, &mod, ®op, &rm);
if (regop == eax) {
AppendToBuffer("test_b ");
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
@@ -1035,11 +1044,19 @@
case 0xC6: // imm8
{ bool is_byte = *data == 0xC6;
data++;
- AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
- data += PrintRightOperand(data);
- int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += is_byte ? 1 : 4;
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
+ }
}
break;
@@ -1054,7 +1071,7 @@
default: UnimplementedInstruction();
}
AppendToBuffer("%s ", mnem);
- data += PrintRightOperand(data);
+ data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
@@ -1067,9 +1084,15 @@
int mod, regop, rm;
data++;
get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
}
break;
@@ -1181,7 +1204,7 @@
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x70) {
data++;
int mod, regop, rm;
@@ -1224,7 +1247,7 @@
data++;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0x7E) {
data++;
@@ -1242,12 +1265,16 @@
NameOfXMMRegister(rm));
data++;
} else if (*data == 0xE7) {
- AppendToBuffer("movntdq ");
data++;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ if (mod == 3) {
+ AppendToBuffer("movntdq ");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
@@ -1338,14 +1365,14 @@
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (b2 == 0x10) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
} else {
const char* mnem = "?";
switch (b2) {
@@ -1361,27 +1388,11 @@
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
if (b2 == 0x2A) {
- if (mod != 0x3) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm));
- data++;
- }
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
} else if (b2 == 0x2C) {
- if (mod != 0x3) {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- }
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
const char* const pseudo_op[] = {
@@ -1400,16 +1411,8 @@
NameOfXMMRegister(rm));
data += 2;
} else {
- if (mod != 0x3) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- }
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
}
}
} else {
@@ -1421,27 +1424,28 @@
if (*(data+1) == 0x0F) {
if (*(data+2) == 0x2C) {
data += 3;
- data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*(data+2) == 0x5A) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("cvtss2sd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*(data+2) == 0x6F) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
} else if (*(data+2) == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- data += PrintRightOperand(data);
+ data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
UnimplementedInstruction();
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 174a946..c8f5fdf 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -727,9 +727,9 @@
prop->key()->AsLiteral()->handle()->IsSmi());
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(is_strict()
- ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
@@ -1371,8 +1371,8 @@
__ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0));
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1543,27 +1543,26 @@
}
}
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
break;
}
}
- // For property compound assignments we need another deoptimization
- // point after the property load.
- if (property != NULL) {
- PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
- }
-
Token::Value op = expr->binary_op();
__ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
@@ -1763,8 +1762,8 @@
__ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -1786,8 +1785,8 @@
}
__ pop(eax); // Restore value.
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -1812,8 +1811,8 @@
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
@@ -1915,8 +1914,8 @@
__ pop(edx);
}
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -1955,8 +1954,8 @@
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -3049,8 +3048,8 @@
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
- __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
- __ j(below, &slow_case);
+ __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
+ __ j(not_equal, &slow_case);
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask);
__ j(not_zero, &slow_case);
@@ -3748,7 +3747,11 @@
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
- PrepareForBailout(expr->increment(), TOS_REG);
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailout(expr->increment(), TOS_REG);
+ }
// Call ToNumber only if operand is not a smi.
NearLabel no_conversion;
@@ -3842,8 +3845,8 @@
__ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx);
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3859,8 +3862,8 @@
__ pop(ecx);
__ pop(edx);
Handle<Code> ic(Builtins::builtin(
- is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
- : Builtins::KeyedStoreIC_Initialize));
+ is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 269591e..78784b2 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -743,11 +743,6 @@
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCharAt: {
- StringCharAtStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::NumberToString: {
NumberToStringStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -898,7 +893,49 @@
}
if (right->IsConstantOperand()) {
- __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
+ // Try strength reductions on the multiplication.
+ // All replacement instructions are at most as long as the imul
+ // and have better latency.
+ int constant = ToInteger32(LConstantOperand::cast(right));
+ if (constant == -1) {
+ __ neg(left);
+ } else if (constant == 0) {
+ __ xor_(left, Operand(left));
+ } else if (constant == 2) {
+ __ add(left, Operand(left));
+ } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // If we know that the multiplication can't overflow, it's safe to
+ // use instructions that don't set the overflow flag for the
+ // multiplication.
+ switch (constant) {
+ case 1:
+ // Do nothing.
+ break;
+ case 3:
+ __ lea(left, Operand(left, left, times_2, 0));
+ break;
+ case 4:
+ __ shl(left, 2);
+ break;
+ case 5:
+ __ lea(left, Operand(left, left, times_4, 0));
+ break;
+ case 8:
+ __ shl(left, 3);
+ break;
+ case 9:
+ __ lea(left, Operand(left, left, times_8, 0));
+ break;
+ case 16:
+ __ shl(left, 4);
+ break;
+ default:
+ __ imul(left, left, constant);
+ break;
+ }
+ } else {
+ __ imul(left, left, constant);
+ }
} else {
__ imul(left, ToOperand(right));
}
@@ -2026,7 +2063,7 @@
}
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->check_hole_value()) {
@@ -2036,6 +2073,19 @@
}
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->global_object()).is(eax));
+ ASSERT(ToRegister(instr->result()).is(eax));
+
+ __ mov(ecx, instr->name());
+ RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+ RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ CallCode(ic, mode, instr);
+}
+
+
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0));
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
@@ -3502,9 +3552,15 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister());
__ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(instr->condition(), instr->environment());
+ DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+ LOperand* input = instr->InputAt(0);
+ __ test(ToRegister(input), Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr->environment());
}
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 3148ae2..5b83e1e 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1683,7 +1683,7 @@
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, zero));
+ return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1704,7 +1704,7 @@
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new LCheckSmi(value, not_zero));
+ return AssignEnvironment(new LCheckSmi(value));
}
@@ -1745,14 +1745,22 @@
}
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+ LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* global_object = UseFixed(instr->global_object(), eax);
+ LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 9a07c6f..c281d64 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -70,6 +70,7 @@
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
+ V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClassOfTest) \
@@ -120,7 +121,8 @@
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
- V(LoadGlobal) \
+ V(LoadGlobalCell) \
+ V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@@ -1270,10 +1272,27 @@
};
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+ inputs_[0] = context;
+ inputs_[1] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* global_object() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool for_typeof() const { return hydrogen()->for_typeof(); }
};
@@ -1792,20 +1811,21 @@
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* value, Condition condition)
- : condition_(condition) {
+ explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
- Condition condition() const { return condition_; }
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const {
- return (condition_ == zero) ? "check-non-smi" : "check-smi";
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckNonSmi(LOperand* value) {
+ inputs_[0] = value;
}
- private:
- Condition condition_;
+ DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 2ba95c4..d5c3f53 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -52,7 +52,7 @@
void MacroAssembler::RecordWriteHelper(Register object,
Register addr,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, not_equal, ¬_in_new_space);
@@ -113,7 +113,7 @@
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
@@ -141,7 +141,7 @@
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(object, Immediate(BitCast<int32_t>(kZapValue)));
mov(address, Immediate(BitCast<int32_t>(kZapValue)));
mov(value, Immediate(BitCast<int32_t>(kZapValue)));
@@ -285,7 +285,7 @@
push(esi);
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(Factory::undefined_value()));
Check(not_equal, "code object not properly patched");
}
@@ -293,7 +293,7 @@
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
Check(equal, "stack frame types must match");
@@ -550,7 +550,7 @@
mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(Operand(scratch), Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
@@ -560,7 +560,7 @@
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
push(scratch);
// Read the first word and compare to global_context_map.
mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
@@ -584,7 +584,7 @@
mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(holder_reg, Factory::null_value());
Check(not_equal, "JSGlobalProxy::context() should not be null.");
@@ -637,7 +637,7 @@
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
Check(zero, "Unaligned allocation in new space");
}
@@ -661,7 +661,7 @@
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
if (result_end.is_valid()) {
@@ -718,7 +718,7 @@
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
mov(result_end, Immediate(0x7191));
@@ -764,7 +764,7 @@
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Immediate(0x7091));
mov(result_end, Immediate(0x7191));
@@ -1320,7 +1320,7 @@
// pointer to out cell.
lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
mov(Operand(esp, 0 * kPointerSize), scratch); // output.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0)); // out cell.
}
}
@@ -1621,7 +1621,7 @@
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
Check(equal, "Yo dawg, I heard you liked function contexts "
"so I put function contexts in all your contexts");
@@ -1643,7 +1643,7 @@
Register map) {
// Load the initial map. The global functions all have initial maps.
mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, Factory::meta_map(), &fail, false);
jmp(&ok);
@@ -1787,12 +1787,12 @@
void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (FLAG_debug_code) Check(cc, msg);
+ if (emit_debug_code()) Check(cc, msg);
}
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
@@ -1860,7 +1860,7 @@
void MacroAssembler::JumpIfNotNumber(Register reg,
TypeInfo info,
Label* on_not_number) {
- if (FLAG_debug_code) AbortIfSmi(reg);
+ if (emit_debug_code()) AbortIfSmi(reg);
if (!info.IsNumber()) {
cmp(FieldOperand(reg, HeapObject::kMapOffset),
Factory::heap_number_map());
@@ -1874,7 +1874,7 @@
Register scratch,
TypeInfo info,
Label* on_not_int32) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
AbortIfSmi(source);
AbortIfNotNumber(source);
}
@@ -1994,7 +1994,7 @@
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
// Check stack alignment.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
CheckStackAlignment();
}