Version 3.4.6
Lowered limit on code space for systems with low memory supply.
Allowed compiling v8_shell with the 'host' toolset (issue 82437).
Extended setBreakpoint API to accept partial script name (issue 1418).
Made multi-line comments not count when deciding whether the '-->'
comment starter is first on a line. This matches Safari.
Made handling of non-array recievers in Array length setter correct (issue 1491).
Added ability to heap profiler to iterate over snapshot's node (issue 1481).
Review URL: http://codereview.chromium.org/7232010
git-svn-id: http://v8.googlecode.com/svn/trunk@8386 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index d8df05e..806c679 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -102,6 +102,15 @@
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
+
+ // This means one of the object's prototypes is a JSArray and the
+ // object does not have a 'length' property. Calling SetProperty
+ // causes an infinite loop.
+ if (!object->IsJSArray()) {
+ return object->SetLocalPropertyIgnoreAttributes(
+ isolate->heap()->length_symbol(), value, NONE);
+ }
+
value = FlattenNumber(value);
// Need to call methods that may trigger GC.
@@ -117,20 +126,8 @@
Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
if (has_exception) return Failure::Exception();
- // Restore raw pointers,
- object = *object_handle;
- value = *value_handle;
-
if (uint32_v->Number() == number_v->Number()) {
- if (object->IsJSArray()) {
- return JSArray::cast(object)->SetElementsLength(*uint32_v);
- } else {
- // This means one of the object's prototypes is a JSArray and
- // the object does not have a 'length' property.
- // Calling SetProperty causes an infinite loop.
- return object->SetLocalPropertyIgnoreAttributes(
- isolate->heap()->length_symbol(), value, NONE);
- }
+ return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
diff --git a/src/api.cc b/src/api.cc
index 7f68b92..3fe5621 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -5886,6 +5886,29 @@
}
+int HeapSnapshot::GetNodesCount() const {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
+ return ToInternal(this)->entries()->length();
+#else
+ return 0;
+#endif
+}
+
+
+const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
+ return reinterpret_cast<const HeapGraphNode*>(
+ ToInternal(this)->entries()->at(index));
+#else
+ return 0;
+#endif
+}
+
+
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 1989f10..51a114f 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -303,6 +303,10 @@
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
+// Aliases for double registers.
+const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
+const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
+
// Coprocessor register
struct CRegister {
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index c99c5ab..b92c25a 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1610,15 +1610,13 @@
}
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
- Label false_result;
- Label not_heap_number;
- Register scratch = r9.is(tos_) ? r7 : r9;
+ Label false_result, true_result, not_string;
+ const Register map = r9.is(tos_) ? r7 : r9;
// undefined -> false
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -1648,11 +1646,31 @@
__ cmp(tos_, ip);
__ b(eq, &false_result);
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ b(¬_heap_number, ne);
+ // Get the map of the heap object.
+ __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+
+ // Undetectable -> false.
+ __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, ne);
+
+ // JavaScript object -> true.
+ __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the greater than condition is satisfied.
+ __ Ret(ge);
+
+ // String value -> false iff empty.
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ b(¬_string, ge);
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // Return string length as boolean value, i.e. return false iff length is 0.
+ __ Ret();
+
+ __ bind(¬_string);
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ b(&true_result, ne);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
@@ -1662,41 +1680,10 @@
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
__ Ret();
- __ bind(¬_heap_number);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
+ // Return 1/0 for true/false in tos_.
+ __ bind(&true_result);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
__ Ret();
-
- // Return 0 in "tos_" for false .
__ bind(&false_result);
__ mov(tos_, Operand(0, RelocInfo::NONE));
__ Ret();
@@ -3550,12 +3537,24 @@
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Save callee-saved vfp registers.
+ __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ }
+
// Get address of argv, see stm above.
// r0: code entry
// r1: function
// r2: receiver
// r3: argc
- __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv
+
+ // Setup argv in r4.
+ int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
+ }
+ __ ldr(r4, MemOperand(sp, offset_to_argv));
// Push a frame with special values setup to mark it as an entry frame.
// r0: code entry
@@ -3680,6 +3679,13 @@
__ mov(lr, Operand(pc));
}
#endif
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Restore callee-saved vfp registers.
+ __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ }
+
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index befeac1..8e3e9dc 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,19 +58,6 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index d6846c8..84e108b 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -72,6 +72,9 @@
static const int kNumCalleeSaved = 7 + kR9Available;
+// Double registers d8 to d15 are callee-saved.
+static const int kNumDoubleCalleeSaved = 8;
+
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index f6aa534..f530428 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1103,13 +1103,9 @@
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsCompareConstantEq()) {
HCompareConstantEq* compare = HCompareConstantEq::cast(v);
@@ -1504,20 +1500,10 @@
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 67483e4..a250bed 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -83,11 +83,9 @@
V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
@@ -637,48 +635,26 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 04712dc..46e0754 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1714,7 +1714,7 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
@@ -1725,29 +1725,7 @@
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 0ed3701..3a75658 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -1001,6 +1001,18 @@
};
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/src/d8.js b/src/d8.js
index 9798078..033455e 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -977,9 +977,14 @@
// specification it is considered a function break point.
pos = target.indexOf(':');
if (pos > 0) {
- type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
+ if (target[0] == '/' && target[target.length - 1] == '/') {
+ type = 'scriptRegExp';
+ target = target.substring(1, target.length - 1);
+ } else {
+ type = 'script';
+ }
// Check for both line and column.
pos = tmp.indexOf(':');
@@ -1984,6 +1989,9 @@
if (breakpoint.script_name) {
result += ' script_name=' + breakpoint.script_name;
}
+ if (breakpoint.script_regexp) {
+ result += ' script_regexp=' + breakpoint.script_regexp;
+ }
result += ' line=' + (breakpoint.line + 1);
if (breakpoint.column != null) {
result += ' column=' + (breakpoint.column + 1);
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 908fcd2..c632e47 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -68,7 +68,8 @@
// The different script break point types.
Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1 };
+ ScriptName: 1,
+ ScriptRegExp: 2 };
function ScriptTypeFlag(type) {
return (1 << type);
@@ -255,8 +256,12 @@
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
- } else { // type == Debug.ScriptBreakPointType.ScriptName
+ } else if (type == Debug.ScriptBreakPointType.ScriptName) {
this.script_name_ = script_id_or_name;
+ } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
+ this.script_regexp_object_ = new RegExp(script_id_or_name);
+ } else {
+ throw new Error("Unexpected breakpoint type " + type);
}
this.line_ = opt_line || 0;
this.column_ = opt_column;
@@ -309,6 +314,11 @@
};
+ScriptBreakPoint.prototype.script_regexp_object = function() {
+ return this.script_regexp_object_;
+};
+
+
ScriptBreakPoint.prototype.line = function() {
return this.line_;
};
@@ -384,10 +394,19 @@
ScriptBreakPoint.prototype.matchesScript = function(script) {
if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
return this.script_id_ == script.id;
- } else { // this.type_ == Debug.ScriptBreakPointType.ScriptName
- return this.script_name_ == script.nameOrSourceURL() &&
- script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + script.lineCount();
+ } else {
+ // We might want to account columns here as well.
+ if (!(script.line_offset <= this.line_ &&
+ this.line_ < script.line_offset + script.lineCount())) {
+ return false;
+ }
+ if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
+ return this.script_name_ == script.nameOrSourceURL();
+ } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
+ return this.script_regexp_object_.test(script.nameOrSourceURL());
+ } else {
+ throw new Error("Unexpected breakpoint type " + this.type_);
+ }
}
};
@@ -431,7 +450,8 @@
}
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
+ column: actual_location.column,
+ script_id: script.id };
this.break_points_.push(break_point);
return break_point;
};
@@ -644,7 +664,8 @@
actual_position += this.sourcePosition(func);
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
+ column: actual_location.column,
+ script_id: script.id };
break_point.setCondition(opt_condition);
return break_point.number();
}
@@ -799,6 +820,15 @@
}
+Debug.setScriptBreakPointByRegExp = function(script_regexp,
+ opt_line, opt_column,
+ opt_condition, opt_groupId) {
+ return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
+ script_regexp, opt_line, opt_column,
+ opt_condition, opt_groupId);
+}
+
+
Debug.enableScriptBreakPoint = function(break_point_number) {
var script_break_point = this.findScriptBreakPoint(break_point_number, false);
script_break_point.enable();
@@ -1549,12 +1579,7 @@
response.failed('Missing argument "type" or "target"');
return;
}
- if (type != 'function' && type != 'handle' &&
- type != 'script' && type != 'scriptId') {
- response.failed('Illegal type "' + type + '"');
- return;
- }
-
+
// Either function or script break point.
var break_point_number;
if (type == 'function') {
@@ -1598,9 +1623,16 @@
break_point_number =
Debug.setScriptBreakPointByName(target, line, column, condition,
groupId);
- } else { // type == 'scriptId.
+ } else if (type == 'scriptId') {
break_point_number =
Debug.setScriptBreakPointById(target, line, column, condition, groupId);
+ } else if (type == 'scriptRegExp') {
+ break_point_number =
+ Debug.setScriptBreakPointByRegExp(target, line, column, condition,
+ groupId);
+ } else {
+ response.failed('Illegal type "' + type + '"');
+ return;
}
// Set additional break point properties.
@@ -1621,9 +1653,14 @@
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
response.body.type = 'scriptId';
response.body.script_id = break_point.script_id();
- } else {
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
response.body.type = 'scriptName';
response.body.script_name = break_point.script_name();
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+ response.body.type = 'scriptRegExp';
+ response.body.script_regexp = break_point.script_regexp_object().source;
+ } else {
+ throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
}
response.body.line = break_point.line();
response.body.column = break_point.column();
@@ -1753,9 +1790,14 @@
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
description.type = 'scriptId';
description.script_id = break_point.script_id();
- } else {
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
description.type = 'scriptName';
description.script_name = break_point.script_name();
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+ description.type = 'scriptRegExp';
+ description.script_regexp = break_point.script_regexp_object().source;
+ } else {
+ throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
}
array.push(description);
}
diff --git a/src/heap.cc b/src/heap.cc
index 90847cf..58ad372 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -153,6 +153,15 @@
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
+ intptr_t max_virtual = OS::MaxVirtualMemory();
+
+ if (max_virtual > 0) {
+ if (code_range_size_ > 0) {
+ // Reserve no more than 1/8 of the memory for the code range.
+ code_range_size_ = Min(code_range_size_, max_virtual >> 3);
+ }
+ }
+
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 9f9becb..771770e 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1571,7 +1571,7 @@
}
-HType HCompareJSObjectEq::CalculateInferredType() {
+HType HCompareObjectEq::CalculateInferredType() {
return HType::Boolean();
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index f2ca730..01898a3 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -91,9 +91,8 @@
V(ClampToUint8) \
V(ClassOfTest) \
V(Compare) \
- V(CompareJSObjectEq) \
+ V(CompareObjectEq) \
V(CompareMap) \
- V(CompareSymbolEq) \
V(CompareConstantEq) \
V(Constant) \
V(Context) \
@@ -2555,9 +2554,9 @@
};
-class HCompareJSObjectEq: public HBinaryOperation {
+class HCompareObjectEq: public HBinaryOperation {
public:
- HCompareJSObjectEq(HValue* left, HValue* right)
+ HCompareObjectEq(HValue* left, HValue* right)
: HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
@@ -2573,47 +2572,13 @@
}
virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq)
+ DECLARE_CONCRETE_INSTRUCTION(CompareObjectEq)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
-class HCompareSymbolEq: public HBinaryOperation {
- public:
- HCompareSymbolEq(HValue* left, HValue* right, Token::Value op)
- : HBinaryOperation(left, right), op_(op) {
- ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- }
-
- Token::Value op() const { return op_; }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
-
- virtual HType CalculateInferredType() { return HType::Boolean(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareSymbolEq);
-
- protected:
- virtual bool DataEquals(HValue* other) {
- return op_ == HCompareSymbolEq::cast(other)->op_;
- }
-
- private:
- const Token::Value op_;
-};
-
-
class HCompareConstantEq: public HUnaryOperation {
public:
HCompareConstantEq(HValue* left, int right, Token::Value op)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 7550809..13e0e16 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -5237,18 +5237,6 @@
}
-HCompareSymbolEq* HGraphBuilder::BuildSymbolCompare(HValue* left,
- HValue* right,
- Token::Value op) {
- ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSymbol(left));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSymbol(right));
- return new(zone()) HCompareSymbolEq(left, right, op);
-}
-
-
HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
@@ -5594,7 +5582,7 @@
AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
- instr = new(zone()) HCompareJSObjectEq(left, right);
+ instr = new(zone()) HCompareObjectEq(left, right);
break;
}
default:
@@ -5603,7 +5591,11 @@
}
} else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
(op == Token::EQ || op == Token::EQ_STRICT)) {
- instr = BuildSymbolCompare(left, right, op);
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+ instr = new(zone()) HCompareObjectEq(left, right);
} else {
HCompare* compare = new(zone()) HCompare(left, right, op);
Representation r = ToRepresentation(type_info);
@@ -5849,7 +5841,7 @@
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HCompareJSObjectEq* result = new(zone()) HCompareJSObjectEq(left, right);
+ HCompareObjectEq* result = new(zone()) HCompareObjectEq(left, right);
ast_context()->ReturnInstruction(result, call->id());
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index c2339e3..78490db 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -882,9 +882,6 @@
ZoneMapList* types,
Handle<String> name);
- HCompareSymbolEq* BuildSymbolCompare(HValue* left,
- HValue* right,
- Token::Value op);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 27dbe21..8fd272b 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -237,55 +237,55 @@
}
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
Factory* factory = masm->isolate()->factory();
+ const Register map = edx;
+
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
// undefined -> false
__ cmp(eax, factory->undefined_value());
__ j(equal, &false_result);
// Boolean -> its value
- __ cmp(eax, factory->true_value());
- __ j(equal, &true_result);
__ cmp(eax, factory->false_value());
__ j(equal, &false_result);
+ __ cmp(eax, factory->true_value());
+ __ j(equal, &true_result);
// Smis: 0 -> false, all other -> true
__ test(eax, Operand(eax));
__ j(zero, &false_result);
__ JumpIfSmi(eax, &true_result);
- // 'null' => false.
+ // 'null' -> false.
__ cmp(eax, factory->null_value());
__ j(equal, &false_result, Label::kNear);
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // Get the map of the heap object.
+ __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ // Undetectable -> false.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(not_zero, &false_result, Label::kNear);
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_SPEC_OBJECT_TYPE);
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, &true_result, Label::kNear);
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string, Label::kNear);
- STATIC_ASSERT(kSmiTag == 0);
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result, Label::kNear);
__ jmp(&true_result, Label::kNear);
__ bind(¬_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, factory->heap_number_map());
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ cmp(map, factory->heap_number_map());
__ j(not_equal, &true_result, Label::kNear);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
@@ -293,12 +293,12 @@
__ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in eax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ mov(eax, 1);
+ __ mov(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ mov(eax, 0);
+ __ mov(tos_, 0);
__ ret(1 * kPointerSize);
}
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index ead7761..d51549d 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -60,18 +60,6 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index f46a30a..02fa2c2 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -566,10 +566,10 @@
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ test(eax, Operand(eax));
+ __ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 93ed553..d427710 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1411,7 +1411,7 @@
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(eax);
__ pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1548,7 +1548,7 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
@@ -1562,32 +1562,7 @@
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmp(left, Operand(right));
- __ mov(result, factory()->false_value());
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 590d943..6f2f8d6 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1106,13 +1106,9 @@
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsCompareConstantEq()) {
HCompareConstantEq* compare = HCompareConstantEq::cast(v);
@@ -1528,20 +1524,10 @@
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 6878d6a..41f41c7 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -75,11 +75,9 @@
V(ClassOfTestAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
V(CmpT) \
V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
@@ -622,48 +620,26 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};
diff --git a/src/ic.cc b/src/ic.cc
index 33b89e0..542466d 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -860,37 +860,40 @@
}
if (FLAG_use_ic) {
- Code* non_monomorphic_stub =
- (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
-
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(isolate()->heap()->length_symbol())) {
- HandleScope scope(isolate());
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
- if (state == PREMONOMORPHIC) {
+ AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
if (object->IsString()) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringLength));
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringLength);
} else {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength));
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringWrapperLength);
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength));
- } else {
- set_target(non_monomorphic_stub);
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringWrapperLength);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
- object = Handle<Object>(Handle<JSValue>::cast(object)->value(),
- isolate());
+ return Smi::FromInt(
+ String::cast(Handle<JSValue>::cast(object)->value())->length());
}
return Smi::FromInt(String::cast(*object)->length());
}
@@ -898,14 +901,21 @@
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_symbol())) {
+ AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_ArrayLength);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
- if (state == PREMONOMORPHIC) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_ArrayLength));
- } else {
- set_target(non_monomorphic_stub);
}
return JSArray::cast(*object)->length();
}
@@ -914,14 +924,22 @@
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
+ { AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_FunctionPrototype);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
- if (state == PREMONOMORPHIC) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_FunctionPrototype));
- } else {
- set_target(non_monomorphic_stub);
+ }
}
return Accessors::FunctionGetPrototype(*object, 0);
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 0ce551d..1aa1838 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1718,7 +1718,6 @@
}
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses FPU instructions.
@@ -1782,7 +1781,7 @@
// "tos_" is a register and contains a non-zero value.
// Hence we implicitly return true if the greater than
// condition is satisfied.
- __ Ret(gt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
// Check for string.
__ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
@@ -1790,7 +1789,7 @@
// "tos_" is a register and contains a non-zero value.
// Hence we implicitly return true if the greater than
// condition is satisfied.
- __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
// String value => false iff empty, i.e., length is zero.
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
@@ -2789,9 +2788,11 @@
// DIV just falls through to allocating a heap number.
}
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
: BinaryOpIC::INT32)) {
- __ bind(&return_heap_number);
// We are using FPU registers so s0 is available.
heap_number_result = s0;
GenerateHeapResultAllocation(masm,
@@ -2970,7 +2971,11 @@
UNREACHABLE();
}
- if (transition.is_linked()) {
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
__ bind(&transition);
GenerateTypeTransition(masm);
}
@@ -3542,15 +3547,10 @@
__ li(a2, Operand(ExternalReference::isolate_address()));
- // From arm version of this function:
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
-
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
// This branch-and-link sequence is needed to find the current PC on mips,
// saved to the ra register.
@@ -4075,11 +4075,252 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne,
+ a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize));
+ __ sll(t3, a2, 1);
+ __ Addu(a3, a3, Operand(t3));
+ __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // t2 : allocated object (tagged)
+ // t5 : mapped parameter count (tagged)
+
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ // a1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a2, a1);
+ __ b(&try_allocate);
+ __ nop(); // Branch delay slot nop.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t6, a2, 1);
+ __ Addu(a3, a3, Operand(t6));
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // a1 = parameter count (tagged)
+ // a2 = argument count (tagged)
+ // Compute the mapped parameter count = min(a1, a2) in a1.
+ Label skip_min;
+ __ Branch(&skip_min, lt, a1, Operand(a2));
+ __ mov(a1, a2);
+ __ bind(&skip_min);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
+ __ sll(t5, a1, 1);
+ __ addiu(t5, t5, kParameterMapHeaderSize);
+ __ bind(¶m_map_size);
+
+ // 2. Backing store.
+ __ sll(t6, a2, 1);
+ __ Addu(t5, t5, Operand(t6));
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into t0.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ lw(a3, FieldMemOperand(t0, i));
+ __ sw(a3, FieldMemOperand(v0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ sw(a2, FieldMemOperand(v0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, t0 will point there, otherwise
+ // it will point to the backing store.
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+ // Move backing store address to a3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a3, t0);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t2, a1, Operand(Smi::FromInt(2)));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ sll(t6, a1, 1);
+ __ Addu(t2, t0, Operand(t6));
+ __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(t2, a1);
+ __ lw(t5, MemOperand(sp, 0 * kPointerSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(a1));
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ sll(t6, t2, 1);
+ __ Addu(a3, t0, Operand(t6));
+ __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
+
+ // t2 = loop variable (tagged)
+ // a1 = mapping index (tagged)
+ // a3 = address of backing store (tagged)
+ // t0 = address of parameter map (tagged)
+ // t1 = temporary scratch (a.o., for address calculation)
+ // t3 = the hole value
+ __ jmp(¶meters_test);
+
+ __ bind(¶meters_loop);
+ __ Subu(t2, t2, Operand(Smi::FromInt(1)));
+ __ sll(t1, t2, 1);
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, t1);
+ __ sw(t5, MemOperand(t6));
+ __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a3, t1);
+ __ sw(t3, MemOperand(t6));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
+
+ __ bind(&skip_parameter_map);
+ // a2 = argument count (tagged)
+ // a3 = address of backing store (tagged)
+ // t1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
+ __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(t5, a1);
+ __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+ __ sll(t6, t5, 1);
+ __ Subu(t0, t0, Operand(t6));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Subu(t0, t0, Operand(kPointerSize));
+ __ lw(t2, MemOperand(t0, 0));
+ __ sll(t6, t5, 1);
+ __ Addu(t1, a3, Operand(t6));
+ __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t5, Operand(a2));
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // a2 = argument count (taggged)
+ __ bind(&runtime);
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4112,40 +4353,31 @@
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- a1,
- v0,
- a2,
- a3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ __ AllocateInNewSpace(a1,
+ v0,
+ a2,
+ a3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
- __ lw(t0, MemOperand(t0,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ __ lw(t0, MemOperand(t0, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
- }
-
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
__ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ Heap::kArgumentsLengthIndex * kPointerSize));
Label done;
__ Branch(&done, eq, a1, Operand(zero_reg));
@@ -4155,12 +4387,13 @@
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
__ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop.
+ // Untag the length for the loop.
+ __ srl(a1, a1, kSmiTagSize);
// Copy the fixed array slots.
Label loop;
@@ -4184,7 +4417,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index c2bd225..e2323c1 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -59,19 +59,6 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 73b3984..1f1780e 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -238,17 +238,17 @@
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(a3, v0);
- Move(arguments_shadow->AsSlot(), a3, a1, a2);
- }
Move(arguments->AsSlot(), v0, a1, a2);
}
@@ -1254,13 +1254,12 @@
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
- if (var->is_global() && !var->is_this()) {
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
@@ -1270,7 +1269,7 @@
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
context()->Plug(v0);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1286,7 +1285,7 @@
context()->Plug(v0);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1303,31 +1302,6 @@
} else {
context()->Plug(slot);
}
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(a1, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ li(a0, Operand(key_literal->handle()));
-
- // Call keyed load IC. It has arguments key and receiver in a0 and a1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
- context()->Plug(v0);
}
}
@@ -1571,7 +1545,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1598,27 +1572,13 @@
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in v0 and a1.
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
- __ push(v0);
- __ li(v0, Operand(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ lw(a1, MemOperand(sp, 0));
__ push(v0);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ lw(a1, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
- __ li(v0, Operand(property->key()->AsLiteral()->handle()));
- __ Push(a1, v0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1828,7 +1788,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1859,20 +1819,10 @@
}
case KEYED_PROPERTY: {
__ push(result_register()); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(a2, result_register());
- __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(a1, result_register());
- __ pop(a2);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(a1, result_register());
+ __ pop(a2);
__ pop(a0); // Restore value.
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
@@ -1888,8 +1838,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -3840,7 +3788,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3868,15 +3816,8 @@
__ push(v0);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
- __ push(v0);
- __ li(v0, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ lw(a1, MemOperand(sp, 0));
__ push(v0);
EmitKeyedPropertyLoad(prop);
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index bc837c0..63165e4 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -893,6 +893,162 @@
}
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(object, slow_case);
+
+ // Check that the key is a positive smi.
+ __ And(scratch1, key, Operand(0x8000001));
+ __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ Addu(scratch3, scratch3, Operand(kOffset));
+
+ __ Addu(scratch2, scratch1, scratch3);
+ __ lw(scratch2, MemOperand(scratch2));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch2, scratch1, scratch3);
+ return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+ __ li(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ Addu(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, backing_store, scratch);
+ return MemOperand(scratch);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, ¬in, &slow);
+ __ lw(v0, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
+ __ lw(a2, unmapped_location);
+ __ Branch(&slow, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ mov(v0, a2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow);
+ __ sw(a0, mapped_location);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
+ __ sw(a0, unmapped_location);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, ¬in, &slow);
+ __ lw(a1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
+ __ lw(a1, unmapped_location);
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow, eq, a1, Operand(a3));
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ---------- S t a t e --------------
// -- ra : return address
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 2e946f9..3e5a009 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -3408,6 +3408,7 @@
case JSObject::FAST_ELEMENTS:
case JSObject::FAST_DOUBLE_ELEMENTS:
case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
@@ -3502,6 +3503,7 @@
case JSObject::FAST_ELEMENTS:
case JSObject::FAST_DOUBLE_ELEMENTS:
case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3861,6 +3863,7 @@
case JSObject::FAST_ELEMENTS:
case JSObject::FAST_DOUBLE_ELEMENTS:
case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3926,6 +3929,7 @@
case JSObject::FAST_ELEMENTS:
case JSObject::FAST_DOUBLE_ELEMENTS:
case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4095,6 +4099,7 @@
case JSObject::FAST_ELEMENTS:
case JSObject::FAST_DOUBLE_ELEMENTS:
case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 69d66cd..5ebf8f8 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2060,7 +2060,7 @@
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SMI_ACCESSORS(ByteArray, length, kLengthOffset)
-// TODO(jkummerow): Investigate if it's possible to s/INT/SMI/ here (and
+// TODO(1493): Investigate if it's possible to s/INT/SMI/ here (and
// subsequently unify H{Fixed,External}ArrayLength).
INT_ACCESSORS(ExternalArray, length, kLengthOffset)
diff --git a/src/parser.cc b/src/parser.cc
index 8a2258f..b04a2f9 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -4914,7 +4914,7 @@
bool allow_lazy,
ParserRecorder* recorder) {
Isolate* isolate = Isolate::Current();
- V8JavaScriptScanner scanner(isolate->unicode_cache());
+ JavaScriptScanner scanner(isolate->unicode_cache());
scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
diff --git a/src/parser.h b/src/parser.h
index 1697eb7..8022018 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -466,7 +466,7 @@
void ReportMessage(const char* message, Vector<const char*> args);
bool inside_with() const { return with_nesting_level_ > 0; }
- V8JavaScriptScanner& scanner() { return scanner_; }
+ JavaScriptScanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
@@ -695,7 +695,7 @@
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
- V8JavaScriptScanner scanner_;
+ JavaScriptScanner scanner_;
Scope* top_scope_;
int with_nesting_level_;
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index c4b0fb8..83f6c81 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -54,6 +54,18 @@
namespace v8 {
namespace internal {
+
+// Maximum size of the virtual memory. 0 means there is no artificial
+// limit.
+
+intptr_t OS::MaxVirtualMemory() {
+ struct rlimit limit;
+ int result = getrlimit(RLIMIT_DATA, &limit);
+ if (result != 0) return 0;
+ return limit.rlim_cur;
+}
+
+
// ----------------------------------------------------------------------------
// Math functions
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index f7e0bb2..b7eed47 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -44,6 +44,11 @@
namespace v8 {
namespace internal {
+intptr_t OS::MaxVirtualMemory() {
+ return 0;
+}
+
+
// Test for finite value - usually defined in math.h
int isfinite(double x) {
return _finite(x);
diff --git a/src/platform.h b/src/platform.h
index 31f727c..06d3ca4 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -288,6 +288,10 @@
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // Maximum size of the virtual memory. 0 means there is no artificial
+ // limit.
+ static intptr_t MaxVirtualMemory();
+
// Returns the double constant NAN
static double nan_value();
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 1a7402f..e0ab500 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -158,24 +158,6 @@
};
-class StandAloneJavaScriptScanner : public JavaScriptScanner {
- public:
- explicit StandAloneJavaScriptScanner(UnicodeCache* unicode_cache)
- : JavaScriptScanner(unicode_cache) { }
-
- void Initialize(UC16CharacterStream* source) {
- source_ = source;
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- has_multiline_comment_before_next_ = false;
- SkipWhiteSpace();
- Scan();
- }
-};
-
-
// Functions declared by allocation.h and implemented in both api.cc (for v8)
// or here (for a stand-alone preparser).
@@ -195,7 +177,7 @@
internal::InputStreamUTF16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
internal::UnicodeCache unicode_cache;
- internal::StandAloneJavaScriptScanner scanner(&unicode_cache);
+ internal::JavaScriptScanner scanner(&unicode_cache);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
preparser::PreParser::PreParseResult result =
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index a55f0f4..1564e98 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -77,6 +77,18 @@
: Scanner(scanner_contants), octal_pos_(Location::invalid()) { }
+void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
+ source_ = source;
+ // Need to capture identifiers in order to recognize "get" and "set"
+ // in object literals.
+ Init();
+ // Skip initial whitespace allowing HTML comment ends just like
+ // after a newline and scan first token.
+ has_line_terminator_before_next_ = true;
+ SkipWhiteSpace();
+ Scan();
+}
+
Token::Value JavaScriptScanner::Next() {
current_ = next_;
has_line_terminator_before_next_ = false;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index cb279a6..b519510 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -471,6 +471,8 @@
explicit JavaScriptScanner(UnicodeCache* scanner_contants);
+ void Initialize(UC16CharacterStream* source);
+
// Returns the next token.
Token::Value Next();
@@ -535,7 +537,7 @@
// the current token, and before the next. Does not count newlines
// inside multiline comments.
bool has_line_terminator_before_next_;
- // Whether there is a multi-line comment that containins a
+ // Whether there is a multi-line comment that contains a
// line-terminator after the current token, and before the next.
bool has_multiline_comment_before_next_;
};
@@ -547,14 +549,20 @@
class KeywordMatcher {
// Incrementally recognize keywords.
//
-// Recognized keywords:
-// break case catch const* continue debugger* default delete do else
-// finally false for function if in instanceof native* new null
-// return switch this throw true try typeof var void while with
+// Recognized as keywords:
+// break, case, catch, const*, continue, debugger, default, delete, do,
+// else, finally, false, for, function, if, in, instanceof, new, null,
+// return, switch, this, throw, true, try, typeof, var, void, while, with.
//
-// *: Actually "future reserved keywords". These are the only ones we
-// recognize, the remaining are allowed as identifiers.
-// In ES5 strict mode, we should disallow all reserved keywords.
+// Recognized as Future Reserved Keywords (normal and strict mode):
+// class, enum, export, extends, implements, import, interface,
+// let, package, private, private, protected, public, public,
+// static, yield.
+//
+// *: Actually a "future reserved keyword". It's the only one we are
+// recognizing outside of ES5 strict mode, the remaining are allowed
+// as identifiers.
+//
public:
KeywordMatcher()
: state_(INITIAL),
diff --git a/src/scanner.cc b/src/scanner.cc
index 844db1b..5919073 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -324,23 +324,4 @@
complete_ = true;
}
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-
-
-void V8JavaScriptScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- has_multiline_comment_before_next_ = false;
- SkipWhiteSpace();
- Scan();
-}
-
-
} } // namespace v8::internal
diff --git a/src/scanner.h b/src/scanner.h
index 804fac8..e66dd60 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -126,21 +126,6 @@
const uc16* raw_data_; // Pointer to the actual array of characters.
};
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-// JavaScript scanner getting its input from either a V8 String or a unicode
-// CharacterStream.
-
-class V8JavaScriptScanner : public JavaScriptScanner {
- public:
- explicit V8JavaScriptScanner(UnicodeCache* unicode_cache)
- : JavaScriptScanner(unicode_cache) {}
-
- void Initialize(UC16CharacterStream* source);
-};
-
-
} } // namespace v8::internal
#endif // V8_SCANNER_H_
diff --git a/src/version.cc b/src/version.cc
index 3700910..bd2ef9c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 4
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 6
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0c33ea3..e8642a5 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -231,8 +231,11 @@
}
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
+ const Register map = rdx;
+
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// undefined -> false
@@ -250,48 +253,45 @@
__ j(equal, &false_result);
__ JumpIfSmi(rax, &true_result);
- // 'null' => false.
+ // 'null' -> false.
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, &false_result, Label::kNear);
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+ // Get the map of the heap object.
+ __ movq(map, FieldOperand(rax, HeapObject::kMapOffset));
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ // Undetectable -> false.
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &false_result, Label::kNear);
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_SPEC_OBJECT_TYPE));
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, &true_result, Label::kNear);
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string, Label::kNear);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result, Label::kNear);
__ jmp(&true_result, Label::kNear);
__ bind(¬_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result, Label::kNear);
- // HeapNumber => false iff +0, -0, or NaN.
+ // HeapNumber -> false iff +0, -0, or NaN.
// These three cases set the zero flag when compared to zero using ucomisd.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result, Label::kNear);
__ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in rax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ Set(rax, 1);
+ __ Set(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ Set(rax, 0);
+ __ Set(tos_, 0);
__ ret(1 * kPointerSize);
}
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 2774403..a7ed91c 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -59,18 +59,6 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 7bb2e61..14c95bc 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1062,11 +1062,12 @@
AppendToBuffer("movq ");
current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x50) {
+ AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
const char* mnemonic = "?";
- if (opcode == 0x50) {
- mnemonic = "movmskpd";
- } else if (opcode == 0x54) {
+ if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index fba6cc2..adfc72a 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -564,10 +564,10 @@
Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ testq(rax, rax);
+ __ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 3a62182..435079f 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1413,7 +1413,7 @@
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(rax);
__ Pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1556,7 +1556,7 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEq(LCmpObjectEq* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
@@ -1572,32 +1572,7 @@
}
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmpq(left, right);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmpq(left, right);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 138219b..c0b4657 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1100,13 +1100,9 @@
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+ } else if (v->IsCompareObjectEq()) {
+ HCompareObjectEq* compare = HCompareObjectEq::cast(v);
+ return new LCmpObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
} else if (v->IsCompareConstantEq()) {
HCompareConstantEq* compare = HCompareConstantEq::cast(v);
@@ -1504,20 +1500,10 @@
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEq(HCompareObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+ LCmpObjectEq* result = new LCmpObjectEq(left, right);
return DefineAsRegister(result);
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 182e0f5..56d04e8 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -83,11 +83,9 @@
V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEq) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
@@ -620,48 +618,26 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};