Version 3.3.7
Updated MIPS infrastructure files.
Performance improvements and bug fixes on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@7897 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index f4a3121..e5fbd65 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2011-05-16: Version 3.3.7
+
+ Updated MIPS infrastructure files.
+
+ Performance improvements and bug fixes on all platforms.
+
+
2011-05-11: Version 3.3.6
Updated MIPS infrastructure files.
diff --git a/include/v8-debug.h b/include/v8-debug.h
index 0bdff84..504cbfe 100755
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -127,7 +127,7 @@
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
- * running in it's own context which is entered at this point.
+ * running in its own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
@@ -164,12 +164,13 @@
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
- * running in it's own context which is entered at this point.
+ * running in its own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
/**
- * Client data passed with the corresponding callbak whet it was registered.
+ * Client data passed with the corresponding callback when it was
+ * registered.
*/
virtual Handle<Value> GetCallbackData() const = 0;
@@ -310,7 +311,7 @@
* get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change.
- * The following example show a JavaScript function which when passed to
+ * The following example shows a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution.
*
* \code
@@ -352,7 +353,7 @@
* 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
* to reading and processing debug messages;
* 3. V8 is not running at all or has called some long-working C++ function;
- * by default it means that processing of all debug message will be deferred
+ * by default it means that processing of all debug messages will be deferred
* until V8 gets control again; however, embedding application may improve
* this by manually calling this method.
*
@@ -376,7 +377,7 @@
static void ProcessDebugMessages();
/**
- * Debugger is running in it's own context which is entered while debugger
+ * Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject
* to change.
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index db56e26..940a35c 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -206,7 +206,7 @@
/**
* HeapSnapshotEdge represents a directed connection between heap
- * graph nodes: from retaners to retained nodes.
+ * graph nodes: from retainers to retained nodes.
*/
class V8EXPORT HeapGraphEdge {
public:
@@ -357,7 +357,7 @@
* Prepare a serialized representation of the snapshot. The result
* is written into the stream provided in chunks of specified size.
* The total length of the serialized snapshot is unknown in
- * advance, it is can be roughly equal to JS heap size (that means,
+ * advance, it can be roughly equal to JS heap size (that means,
* it can be really big - tens of megabytes).
*
* For the JSON format, heap contents are represented as an object
diff --git a/include/v8.h b/include/v8.h
index 457633a..c01856d 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -159,7 +159,7 @@
*
* It is safe to extract the object stored in the handle by
* dereferencing the handle (for instance, to extract the Object* from
- * an Handle<Object>); the value will still be governed by a handle
+ * a Handle<Object>); the value will still be governed by a handle
* behind the scenes and the same rules apply to these values as to
* their handles.
*/
@@ -181,7 +181,7 @@
* constructor allows you to pass handles as arguments by value and
* to assign between handles. However, if you try to assign between
* incompatible handles, for instance from a Handle<String> to a
- * Handle<Number> it will cause a compiletime error. Assigning
+ * Handle<Number> it will cause a compile-time error. Assigning
* between compatible handles, for instance assigning a
* Handle<String> to a variable declared as Handle<Value>, is legal
* because String is a subclass of Value.
@@ -325,7 +325,7 @@
* handles as arguments by value and to assign between persistent
* handles. However, attempting to assign between incompatible
* persistent handles, for instance from a Persistent<String> to a
- * Persistent<Number> will cause a compiletime error. Assigning
+ * Persistent<Number> will cause a compile-time error. Assigning
* between compatible persistent handles, for instance assigning a
* Persistent<String> to a variable declared as Persistent<Value>,
* is allowed as String is a subclass of Value.
@@ -371,7 +371,7 @@
/**
* Releases the storage cell referenced by this persistent handle.
* Does not remove the reference to the cell from any handles.
- * This handle's reference, and any any other references to the storage
+ * This handle's reference, and any other references to the storage
* cell remain and IsEmpty will still return false.
*/
inline void Dispose();
@@ -775,7 +775,7 @@
Local<Array> AsArray();
/**
- * Grab a snapshot of the the current JavaScript execution stack.
+ * Grab a snapshot of the current JavaScript execution stack.
*
* \param frame_limit The maximum number of stack frames we want to capture.
* \param options Enumerates the set of things we will capture for each
@@ -834,7 +834,7 @@
bool IsEval() const;
/**
- * Returns whther or not the associated function is called as a
+ * Returns whether or not the associated function is called as a
* constructor via "new".
*/
bool IsConstructor() const;
@@ -1182,7 +1182,7 @@
* Associate an external string resource with this string by transforming it
* in place so that existing references to this string in the JavaScript heap
* will use the external string resource. The external string resource's
- * character contents needs to be equivalent to this string.
+ * character contents need to be equivalent to this string.
* Returns true if the string has been changed to be an external string.
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
@@ -1204,7 +1204,7 @@
* Associate an external string resource with this string by transforming it
* in place so that existing references to this string in the JavaScript heap
* will use the external string resource. The external string resource's
- * character contents needs to be equivalent to this string.
+ * character contents need to be equivalent to this string.
* Returns true if the string has been changed to be an external string.
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
@@ -1540,8 +1540,8 @@
V8EXPORT void TurnOnAccessCheck();
/**
- * Returns the identity hash for this object. The current implemenation uses
- * a hidden property on the object to store the identity hash.
+ * Returns the identity hash for this object. The current implementation
+ * uses a hidden property on the object to store the identity hash.
*
* The return value will never be 0. Also, it is not guaranteed to be
* unique.
@@ -1622,7 +1622,7 @@
Handle<Value> argv[]);
/**
- * Call an Object as a consturctor if a callback is set by the
+ * Call an Object as a constructor if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
@@ -2243,7 +2243,7 @@
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check is an object has a property.
+ * \param query The callback to invoke to check if an object has a property.
* \param deleter The callback to invoke when deleting a property.
* \param enumerator The callback to invoke to enumerate all the indexed
* properties of an object.
@@ -2706,7 +2706,7 @@
* This means that the fatal error handler is called and that V8 is
* terminated.
*
- * IgnoreOutOfMemoryException can be used to not treat a
+ * IgnoreOutOfMemoryException can be used to not treat an
* out-of-memory situation as a fatal error. This way, the contexts
* that did not cause the out of memory problem might be able to
* continue execution.
@@ -2742,7 +2742,7 @@
/**
* Adds a message listener.
*
- * The same message listener can be added more than once and it that
+ * The same message listener can be added more than once and in that
* case it will be called more than once for each message.
*/
static bool AddMessageListener(MessageCallback that,
@@ -3022,7 +3022,7 @@
* The termination is achieved by throwing an exception that is
* uncatchable by JavaScript exception handlers. Termination
* exceptions act as if they were caught by a C++ TryCatch exception
- * handlers. If forceful termination is used, any C++ TryCatch
+ * handler. If forceful termination is used, any C++ TryCatch
* exception handler that catches an exception should check if that
* exception is a termination exception and immediately return if
* that is the case. Returning immediately in that case will
@@ -3426,7 +3426,7 @@
* } // Destructor called here
* \endcode
*
- * If you wish to stop using V8 in a thread A you can do this by either
+ * If you wish to stop using V8 in a thread A you can do this either
* by destroying the v8::Locker object as above or by constructing a
* v8::Unlocker object:
*
@@ -3502,7 +3502,7 @@
/**
* Start preemption.
*
- * When preemption is started, a timer is fired every n milli seconds
+ * When preemption is started, a timer is fired every n milliseconds
* that will switch between multiple threads that are in contention
* for the V8 lock.
*/
@@ -3682,7 +3682,7 @@
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
- static const int kJSObjectType = 0xa1;
+ static const int kJSObjectType = 0xa2;
static const int kFirstNonstringType = 0x80;
static const int kProxyType = 0x85;
diff --git a/src/SConscript b/src/SConscript
index 5ebc1cc..011a893 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -68,7 +68,6 @@
execution.cc
factory.cc
flags.cc
- frame-element.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
@@ -122,7 +121,6 @@
strtod.cc
stub-cache.cc
token.cc
- top.cc
type-info.cc
unicode.cc
utils.cc
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index f0a6937..0298b0d 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -549,13 +549,21 @@
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
- // Save all general purpose registers before messing with them.
- __ sub(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ vstr(vfp_reg, sp, offset);
+ // Save all VFP registers before messing with them.
+ DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+ DwVfpRegister last =
+ DwVfpRegister::FromAllocationIndex(
+ DwVfpRegister::kNumAllocatableRegisters - 1);
+ ASSERT(last.code() > first.code());
+ ASSERT((last.code() - first.code()) ==
+ (DwVfpRegister::kNumAllocatableRegisters - 1));
+#ifdef DEBUG
+ for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
+ ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+ (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
}
+#endif
+ __ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_).
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
@@ -603,21 +611,30 @@
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r2, MemOperand(sp, i * kPointerSize));
- __ str(r2, MemOperand(r1, offset));
- }
+ ASSERT(kNumberOfRegisters % 2 == 0);
+
+ Label arm_loop;
+ __ add(r6, r1, Operand(FrameDescription::registers_offset()));
+ __ mov(r5, Operand(sp));
+ __ mov(r4, Operand(kNumberOfRegisters / 2));
+
+ __ bind(&arm_loop);
+ __ Ldrd(r2, r3, MemOperand(r5, kPointerSize * 2, PostIndex));
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ Strd(r2, r3, MemOperand(r6, kPointerSize * 2, PostIndex));
+ __ b(gt, &arm_loop);
// Copy VFP registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
+ Label vfp_loop;
+ __ add(r6, r1, Operand(FrameDescription::double_registers_offset()));
+ __ mov(r4, Operand(DwVfpRegister::kNumAllocatableRegisters));
+
+ __ bind(&vfp_loop);
+ __ Ldrd(r2, r3, MemOperand(r5, kDoubleSize, PostIndex));
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ Strd(r2, r3, MemOperand(r6, kDoubleSize, PostIndex));
+ __ b(gt, &vfp_loop);
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index a3775b5..d4bd81c 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -502,13 +502,16 @@
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
- } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
- if (instr->Bits(7, 4) == 0xf) {
- Print("strd");
- } else {
- Print("ldrd");
- }
} else {
+ if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
+ (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
+ if (instr->Bit(5) == 1) {
+ Print("strd");
+ } else {
+ Print("ldrd");
+ }
+ return 5;
+ }
Print("str");
}
return 5;
@@ -1086,10 +1089,10 @@
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
- Format(instr, "vabs'cond 'Dd, 'Dm");
+ Format(instr, "vabs.f64'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
- Format(instr, "vneg'cond 'Dd, 'Dm");
+ Format(instr, "vneg.f64'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5e2180a..af01c29 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -237,6 +237,13 @@
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -1081,6 +1088,12 @@
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@@ -1107,6 +1120,10 @@
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareSymbolEq()) {
+ HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
+ return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
@@ -1189,7 +1206,7 @@
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1507,6 +1524,15 @@
}
+LInstruction* LChunkBuilder::DoCompareSymbolEq(
+ HCompareSymbolEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1531,6 +1557,14 @@
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index e04fdd6..f98c68f 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -80,6 +80,8 @@
V(CmpJSObjectEq) \
V(CmpJSObjectEqAndBranch) \
V(CmpMapAndBranch) \
+ V(CmpSymbolEq) \
+ V(CmpSymbolEqAndBranch) \
V(CmpT) \
V(CmpTAndBranch) \
V(ConstantD) \
@@ -108,12 +110,16 @@
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -165,8 +171,6 @@
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@@ -681,6 +685,28 @@
};
+class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCmpSymbolEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
+};
+
+
+class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+};
+
+
class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
@@ -754,6 +780,31 @@
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 90f4f3d..33733fb 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1620,6 +1620,28 @@
}
+void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ Register result = ToRegister(instr->result());
+
+ __ cmp(left, Operand(right));
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
+}
+
+
+void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmp(left, Operand(right));
+ EmitBranch(true_block, false_block, eq);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1774,6 +1796,40 @@
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ __ JumpIfSmi(input, &false_label);
+ __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
+ __ tst(result, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, &false_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ tst(temp, Operand(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, ne);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 889e981..034e22b 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1757,7 +1757,7 @@
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
return result;
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 7a3c80f..89b87ac 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -3528,7 +3528,8 @@
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(value, r0, r3);
- __ TailCallStub(&stub);
+ MaybeObject* stub_code = masm()->TryTailCallStub(&stub);
+ if (stub_code->IsFailure()) return stub_code;
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
diff --git a/src/assembler.cc b/src/assembler.cc
index 7a10e8e..a75c94a 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -74,6 +74,18 @@
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
+// Implementation of AssemblerBase
+
+AssemblerBase::AssemblerBase(Isolate* isolate)
+ : isolate_(isolate),
+ jit_cookie_(0) {
+ if (FLAG_mask_constants_with_cookie && isolate != NULL) {
+ jit_cookie_ = V8::RandomPrivate(isolate);
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
diff --git a/src/assembler.h b/src/assembler.h
index 1f103d4..7a786bc 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -49,12 +49,14 @@
class AssemblerBase: public Malloced {
public:
- explicit AssemblerBase(Isolate* isolate) : isolate_(isolate) {}
+ explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
+ int jit_cookie() { return jit_cookie_; }
private:
Isolate* isolate_;
+ int jit_cookie_;
};
// -----------------------------------------------------------------------------
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index eb7a4ce..b14fdb2 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1638,10 +1638,12 @@
bool Genesis::InstallExperimentalNatives() {
- if (FLAG_harmony_proxies) {
- for (int i = ExperimentalNatives::GetDebuggerCount();
- i < ExperimentalNatives::GetBuiltinsCount();
- i++) {
+ for (int i = ExperimentalNatives::GetDebuggerCount();
+ i < ExperimentalNatives::GetBuiltinsCount();
+ i++) {
+ if (FLAG_harmony_proxies &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native proxy.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
@@ -1699,14 +1701,14 @@
F(16, global_context()->regexp_function())
-static FixedArray* CreateCache(int size, JSFunction* factory_function) {
+static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
Factory* factory = factory_function->GetIsolate()->factory();
// Caches are supposed to live for a long time, allocate in old space.
int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
// Cannot use cast as object is not fully initialized yet.
JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
*factory->NewFixedArrayWithHoles(array_size, TENURED));
- cache->set(JSFunctionResultCache::kFactoryIndex, factory_function);
+ cache->set(JSFunctionResultCache::kFactoryIndex, *factory_function);
cache->MakeZeroSize();
return cache;
}
@@ -1723,9 +1725,9 @@
int index = 0;
-#define F(size, func) do { \
- FixedArray* cache = CreateCache((size), (func)); \
- caches->set(index++, cache); \
+#define F(size, func) do { \
+ FixedArray* cache = CreateCache((size), Handle<JSFunction>(func)); \
+ caches->set(index++, cache); \
} while (false)
JSFUNCTION_RESULT_CACHE_LIST(F);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 05750f5..f16a8db 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -84,7 +84,8 @@
// List of code stubs only used on MIPS platforms.
#ifdef V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
- V(RegExpCEntry)
+ V(RegExpCEntry) \
+ V(DirectCEntry)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
diff --git a/src/compiler.h b/src/compiler.h
index c19ceff..1c02f68 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -30,7 +30,6 @@
#include "allocation.h"
#include "ast.h"
-#include "frame-element.h"
#include "zone.h"
namespace v8 {
diff --git a/src/factory.cc b/src/factory.cc
index 7dee66f..a8634ac 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -832,6 +832,15 @@
}
+Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
+ Handle<Object> prototype) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSProxy(*handler, *prototype),
+ JSProxy);
+}
+
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
diff --git a/src/factory.h b/src/factory.h
index 71bfdc4..b1d4682 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -231,6 +231,8 @@
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 3b5c087..aa1d274 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -147,7 +147,6 @@
DEFINE_bool(debug_code, false,
"generate extra code (assertions) for debugging")
DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(emit_branch_hints, false, "emit branch hints")
DEFINE_bool(peephole_optimization, true,
"perform peephole optimizations in assembly code")
DEFINE_bool(print_peephole_optimization, false,
@@ -328,7 +327,7 @@
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
-// top.cc
+// isolate.cc
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
diff --git a/src/frame-element.h b/src/frame-element.h
deleted file mode 100644
index d2e41eb..0000000
--- a/src/frame-element.h
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAME_ELEMENT_H_
-#define V8_FRAME_ELEMENT_H_
-
-#include "allocation.h"
-#include "type-info.h"
-#include "macro-assembler.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frame elements
-//
-// The internal elements of the virtual frames. There are several kinds of
-// elements:
-// * Invalid: elements that are uninitialized or not actually part
-// of the virtual frame. They should not be read.
-// * Memory: an element that resides in the actual frame. Its address is
-// given by its position in the virtual frame.
-// * Register: an element that resides in a register.
-// * Constant: an element whose value is known at compile time.
-
-class FrameElement BASE_EMBEDDED {
- public:
- enum SyncFlag {
- NOT_SYNCED,
- SYNCED
- };
-
- inline TypeInfo type_info() {
- // Copied elements do not have type info. Instead
- // we have to inspect their backing element in the frame.
- ASSERT(!is_copy());
- return TypeInfo::FromInt(TypeInfoField::decode(value_));
- }
-
- inline void set_type_info(TypeInfo info) {
- // Copied elements do not have type info. Instead
- // we have to inspect their backing element in the frame.
- ASSERT(!is_copy());
- value_ = value_ & ~TypeInfoField::mask();
- value_ = value_ | TypeInfoField::encode(info.ToInt());
- }
-
- // The default constructor creates an invalid frame element.
- FrameElement() {
- value_ = TypeField::encode(INVALID)
- | CopiedField::encode(false)
- | SyncedField::encode(false)
- | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
- | DataField::encode(0);
- }
-
- // Factory function to construct an invalid frame element.
- static FrameElement InvalidElement() {
- FrameElement result;
- return result;
- }
-
- // Factory function to construct an in-memory frame element.
- static FrameElement MemoryElement(TypeInfo info) {
- FrameElement result(MEMORY, no_reg, SYNCED, info);
- return result;
- }
-
- // Factory function to construct an in-register frame element.
- static FrameElement RegisterElement(Register reg,
- SyncFlag is_synced,
- TypeInfo info) {
- return FrameElement(REGISTER, reg, is_synced, info);
- }
-
- // Factory function to construct a frame element whose value is known at
- // compile time.
- static FrameElement ConstantElement(Handle<Object> value,
- SyncFlag is_synced) {
- TypeInfo info = TypeInfo::TypeFromValue(value);
- FrameElement result(value, is_synced, info);
- return result;
- }
-
- static bool ConstantPoolOverflowed() {
- return !DataField::is_valid(
- Isolate::Current()->frame_element_constant_list()->length());
- }
-
- bool is_synced() const { return SyncedField::decode(value_); }
-
- void set_sync() {
- ASSERT(type() != MEMORY);
- value_ = value_ | SyncedField::encode(true);
- }
-
- void clear_sync() {
- ASSERT(type() != MEMORY);
- value_ = value_ & ~SyncedField::mask();
- }
-
- bool is_valid() const { return type() != INVALID; }
- bool is_memory() const { return type() == MEMORY; }
- bool is_register() const { return type() == REGISTER; }
- bool is_constant() const { return type() == CONSTANT; }
- bool is_copy() const { return type() == COPY; }
-
- bool is_copied() const { return CopiedField::decode(value_); }
- void set_copied() { value_ = value_ | CopiedField::encode(true); }
- void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
-
- // An untagged int32 FrameElement represents a signed int32
- // on the stack. These are only allowed in a side-effect-free
- // int32 calculation, and if a non-int32 input shows up or an overflow
- // occurs, we bail out and drop all the int32 values.
- void set_untagged_int32(bool value) {
- value_ &= ~UntaggedInt32Field::mask();
- value_ |= UntaggedInt32Field::encode(value);
- }
- bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
-
- Register reg() const {
- ASSERT(is_register());
- uint32_t reg = DataField::decode(value_);
- Register result;
- result.code_ = reg;
- return result;
- }
-
- Handle<Object> handle() const {
- ASSERT(is_constant());
- return Isolate::Current()->frame_element_constant_list()->
- at(DataField::decode(value_));
- }
-
- int index() const {
- ASSERT(is_copy());
- return DataField::decode(value_);
- }
-
- bool Equals(FrameElement other) {
- uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
- if (!masked_difference) {
- // The elements are equal if they agree exactly except on copied field.
- return true;
- } else {
- // If two constants have the same value, and agree otherwise, return true.
- return !(masked_difference & ~DataField::mask()) &&
- is_constant() &&
- handle().is_identical_to(other.handle());
- }
- }
-
- // Test if two FrameElements refer to the same memory or register location.
- bool SameLocation(FrameElement* other) {
- if (type() == other->type()) {
- if (value_ == other->value_) return true;
- if (is_constant() && handle().is_identical_to(other->handle())) {
- return true;
- }
- }
- return false;
- }
-
- // Given a pair of non-null frame element pointers, return one of them
- // as an entry frame candidate or null if they are incompatible.
- FrameElement* Combine(FrameElement* other) {
- // If either is invalid, the result is.
- if (!is_valid()) return this;
- if (!other->is_valid()) return other;
-
- if (!SameLocation(other)) return NULL;
- // If either is unsynced, the result is.
- FrameElement* result = is_synced() ? other : this;
- return result;
- }
-
- private:
- enum Type {
- INVALID,
- MEMORY,
- REGISTER,
- CONSTANT,
- COPY
- };
-
- // Used to construct memory and register elements.
- FrameElement(Type type,
- Register reg,
- SyncFlag is_synced,
- TypeInfo info) {
- value_ = TypeField::encode(type)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
- }
-
- // Used to construct constant elements.
- FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
- ZoneObjectList* constant_list =
- Isolate::Current()->frame_element_constant_list();
- value_ = TypeField::encode(CONSTANT)
- | CopiedField::encode(false)
- | SyncedField::encode(is_synced != NOT_SYNCED)
- | TypeInfoField::encode(info.ToInt())
- | DataField::encode(constant_list->length());
- constant_list->Add(value);
- }
-
- Type type() const { return TypeField::decode(value_); }
- void set_type(Type type) {
- value_ = value_ & ~TypeField::mask();
- value_ = value_ | TypeField::encode(type);
- }
-
- void set_index(int new_index) {
- ASSERT(is_copy());
- value_ = value_ & ~DataField::mask();
- value_ = value_ | DataField::encode(new_index);
- }
-
- void set_reg(Register new_reg) {
- ASSERT(is_register());
- value_ = value_ & ~DataField::mask();
- value_ = value_ | DataField::encode(new_reg.code_);
- }
-
- // Encode type, copied, synced and data in one 32 bit integer.
- uint32_t value_;
-
- // Declare BitFields with template parameters <type, start, size>.
- class TypeField: public BitField<Type, 0, 3> {};
- class CopiedField: public BitField<bool, 3, 1> {};
- class SyncedField: public BitField<bool, 4, 1> {};
- class UntaggedInt32Field: public BitField<bool, 5, 1> {};
- class TypeInfoField: public BitField<int, 6, 7> {};
- class DataField: public BitField<uint32_t, 13, 32 - 13> {};
-
- friend class VirtualFrame;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_FRAME_ELEMENT_H_
diff --git a/src/heap.cc b/src/heap.cc
index 975c504..a08e3a3 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -127,6 +127,7 @@
global_gc_prologue_callback_(NULL),
global_gc_epilogue_callback_(NULL),
gc_safe_size_of_old_object_(NULL),
+ total_regexp_code_generated_(0),
tracer_(NULL),
young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
@@ -2800,6 +2801,7 @@
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array());
+ code->set_next_code_flushing_candidate(undefined_value());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -3212,6 +3214,27 @@
}
+MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
+ // Allocate map.
+ // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+ // maps. Will probably depend on the identity of the handler object, too.
+ Object* map_obj;
+ MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
+ if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
+ Map* map = Map::cast(map_obj);
+ map->set_prototype(prototype);
+ map->set_pre_allocated_property_fields(1);
+ map->set_inobject_properties(1);
+
+ // Allocate the proxy object.
+ Object* result;
+ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ JSProxy::cast(result)->set_handler(handler);
+ return result;
+}
+
+
MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
Map* map = constructor->initial_map();
diff --git a/src/heap.h b/src/heap.h
index 2febb5d..6c29a0a 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -452,6 +452,13 @@
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
+ // Allocates a Harmony Proxy.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
+ Object* prototype);
+
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 5aa5dd0..ccc3c18 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -67,6 +67,16 @@
}
+void HValue::AssumeRepresentation(Representation r) {
+ if (CheckFlag(kFlexibleRepresentation)) {
+ ChangeRepresentation(r);
+ // The representation of the value is dictated by type feedback and
+ // will not be changed later.
+ ClearFlag(kFlexibleRepresentation);
+ }
+}
+
+
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 85a06fc..88ce87d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -91,6 +91,7 @@
V(Compare) \
V(CompareJSObjectEq) \
V(CompareMap) \
+ V(CompareSymbolEq) \
V(Constant) \
V(Context) \
V(DeleteProperty) \
@@ -110,10 +111,11 @@
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
+ V(IsConstructCall) \
V(IsNull) \
V(IsObject) \
V(IsSmi) \
- V(IsConstructCall) \
+ V(IsUndetectable) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@@ -555,6 +557,7 @@
RepresentationChanged(r);
representation_ = r;
}
+ void AssumeRepresentation(Representation r);
virtual bool IsConvertibleToInteger() const { return true; }
@@ -860,6 +863,11 @@
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+ enum UseEnvironment {
+ kNoUses,
+ kUseAll
+ };
+
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
values_[index] = value;
@@ -2409,6 +2417,40 @@
};
+class HCompareSymbolEq: public HBinaryOperation {
+ public:
+ HCompareSymbolEq(HValue* left, HValue* right, Token::Value op)
+ : HBinaryOperation(left, right), op_(op) {
+ ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnMaps);
+ }
+
+ Token::Value op() const { return op_; }
+
+ virtual bool EmitAtUses() {
+ return !HasSideEffects() && !HasMultipleUses();
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType() { return HType::Boolean(); }
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareSymbolEq);
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ return op_ == HCompareSymbolEq::cast(other)->op_;
+ }
+
+ private:
+ const Token::Value op_;
+};
+
+
class HUnaryPredicate: public HUnaryOperation {
public:
explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
@@ -2469,6 +2511,17 @@
};
+class HIsUndetectable: public HUnaryPredicate {
+ public:
+ explicit HIsUndetectable(HValue* value) : HUnaryPredicate(value) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HIsConstructCall: public HTemplateInstruction<0> {
public:
HIsConstructCall() {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 923aaa9..18d77f3 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -115,12 +115,13 @@
}
-HDeoptimize* HBasicBlock::CreateDeoptimize() {
+HDeoptimize* HBasicBlock::CreateDeoptimize(
+ HDeoptimize::UseEnvironment has_uses) {
ASSERT(HasEnvironment());
+ if (has_uses == HDeoptimize::kNoUses) return new(zone()) HDeoptimize(0);
+
HEnvironment* environment = last_environment();
-
HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
-
for (int i = 0; i < environment->length(); i++) {
HValue* val = environment->values()->at(i);
instr->AddEnvironmentValue(val);
@@ -2490,7 +2491,9 @@
// Unconditionally deoptimize on the first non-smi compare.
clause->RecordTypeFeedback(oracle());
if (!clause->IsSmiCompare()) {
- current_block()->FinishExitWithDeoptimization();
+ // Finish with deoptimize and add uses of enviroment values to
+ // account for invisible uses.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
set_current_block(NULL);
break;
}
@@ -3237,7 +3240,7 @@
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization();
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
instr->set_position(expr->position());
@@ -3916,7 +3919,7 @@
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
- current_block()->FinishExitWithDeoptimization();
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else {
HValue* context = environment()->LookupContext();
HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
@@ -4630,6 +4633,10 @@
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HInstruction* instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
+ TypeInfo info = oracle()->UnaryType(expr);
+ Representation rep = ToRepresentation(info);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4694,11 +4701,13 @@
? graph_->GetConstant1()
: graph_->GetConstantMinus1();
HInstruction* instr = new(zone()) HAdd(value, delta);
- Representation rep = ToRepresentation(oracle()->IncrementType(expr));
+ TypeInfo info = oracle()->IncrementType(expr);
+ Representation rep = ToRepresentation(info);
if (rep.IsTagged()) {
rep = Representation::Integer32();
}
- AssumeRepresentation(instr, rep);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
return instr;
}
@@ -4834,6 +4843,18 @@
}
+HCompareSymbolEq* HGraphBuilder::BuildSymbolCompare(HValue* left,
+ HValue* right,
+ Token::Value op) {
+ ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+ return new(zone()) HCompareSymbolEq(left, right, op);
+}
+
+
HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
@@ -4858,15 +4879,13 @@
(right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
return instr;
}
- if (FLAG_trace_representation) {
- PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
- }
Representation rep = ToRepresentation(info);
// We only generate either int32 or generic tagged bitwise operations.
if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
rep = Representation::Integer32();
}
- AssumeRepresentation(instr, rep);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
return instr;
}
@@ -5034,21 +5053,23 @@
}
-void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
- if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
- if (FLAG_trace_representation) {
- PrintF("Assume representation for %s to be %s (%d)\n",
- value->Mnemonic(),
- r.Mnemonic(),
- graph_->GetMaximumValueID());
- }
- value->ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback and
- // will not be changed later.
- value->ClearFlag(HValue::kFlexibleRepresentation);
- } else if (FLAG_trace_representation) {
- PrintF("No representation assumed\n");
- }
+void HGraphBuilder::TraceRepresentation(Token::Value op,
+ TypeInfo info,
+ HValue* value,
+ Representation rep) {
+ if (!FLAG_trace_representation) return;
+ // TODO(svenpanne) Under which circumstances are we actually not flexible?
+ // At first glance, this looks a bit weird...
+ bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
+ PrintF("Operation %s has type info %s, %schange representation assumption "
+ "for %s (ID %d) from %s to %s\n",
+ Token::Name(op),
+ info.ToString(),
+ flexible ? "" : " DO NOT ",
+ value->Mnemonic(),
+ graph_->GetMaximumValueID(),
+ value->representation().Mnemonic(),
+ rep.Mnemonic());
}
@@ -5153,6 +5174,9 @@
return Bailout("Unsupported non-primitive compare");
break;
}
+ } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
+ (op == Token::EQ || op == Token::EQ_STRICT)) {
+ instr = BuildSymbolCompare(left, right, op);
} else {
HCompare* compare = new(zone()) HCompare(left, right, op);
Representation r = ToRepresentation(type_info);
@@ -5274,7 +5298,11 @@
void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
- return Bailout("inlined runtime function: IsUndetectableObject");
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ ast_context()->ReturnInstruction(new(zone()) HIsUndetectable(value),
+ call->id());
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e3ab2eb..8be1637 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -34,6 +34,7 @@
#include "ast.h"
#include "compiler.h"
#include "hydrogen-instructions.h"
+#include "type-info.h"
#include "zone.h"
namespace v8 {
@@ -125,8 +126,8 @@
void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
void AssignCommonDominator(HBasicBlock* other);
- void FinishExitWithDeoptimization() {
- FinishExit(CreateDeoptimize());
+ void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
+ FinishExit(CreateDeoptimize(has_uses));
}
// Add the inlined function exit sequence, adding an HLeaveInlined
@@ -153,7 +154,7 @@
void AddDominatedBlock(HBasicBlock* block);
HSimulate* CreateSimulate(int id);
- HDeoptimize* CreateDeoptimize();
+ HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
HGraph* graph_;
@@ -819,7 +820,11 @@
// to push them as outgoing parameters.
template <int V> HInstruction* PreProcessCall(HCall<V>* call);
- void AssumeRepresentation(HValue* value, Representation r);
+ void TraceRepresentation(Token::Value op,
+ TypeInfo info,
+ HValue* value,
+ Representation rep);
+ void AssumeRepresentation(HValue* value, Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetupScope(Scope* scope);
@@ -871,6 +876,9 @@
ZoneMapList* types,
Handle<String> name);
+ HCompareSymbolEq* BuildSymbolCompare(HValue* left,
+ HValue* right,
+ Token::Value op);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 74cbe46..a7602e7 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1423,10 +1423,9 @@
}
-void Assembler::j(Condition cc, Label* L, Hint hint, Label::Distance distance) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
ASSERT(0 <= cc && cc < 16);
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -1456,10 +1455,9 @@
}
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
ASSERT((0 <= cc) && (cc < 16));
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
// 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
@@ -1467,9 +1465,8 @@
}
-void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+void Assembler::j(Condition cc, Handle<Code> code) {
EnsureSpace ensure_space(this);
- if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
// 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F);
EMIT(0x80 | cc);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index cd55da7..e933102 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -249,23 +249,6 @@
}
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -296,6 +279,7 @@
RelocInfo::Mode rmode_;
friend class Assembler;
+ friend class MacroAssembler;
};
@@ -863,13 +847,9 @@
// Conditional jumps
void j(Condition cc,
Label* L,
- Hint hint,
Label::Distance distance = Label::kFar);
- void j(Condition cc, Label* L, Label::Distance distance = Label::kFar) {
- j(cc, L, no_hint, distance);
- }
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
- void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
// Floating-point operations
void fld(int i);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a1682aa..44f0a97 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -356,12 +356,12 @@
// If the result is a smi, it is *not* an object in the ECMA sense.
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &use_receiver, not_taken);
+ __ j(zero, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit, not_taken);
+ __ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -568,7 +568,7 @@
// 1. Make sure we have at least one argument.
{ Label done;
__ test(eax, Operand(eax));
- __ j(not_zero, &done, taken);
+ __ j(not_zero, &done);
__ pop(ebx);
__ push(Immediate(factory->undefined_value()));
__ push(ebx);
@@ -582,9 +582,9 @@
// 1 ~ return address.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function, not_taken);
+ __ j(zero, &non_function);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &non_function, not_taken);
+ __ j(not_equal, &non_function);
// 3a. Patch the first argument if necessary when calling a function.
@@ -680,7 +680,7 @@
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
{ Label function;
__ test(edi, Operand(edi));
- __ j(not_zero, &function, taken);
+ __ j(not_zero, &function);
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -729,7 +729,7 @@
__ shl(edx, kPointerSizeLog2 - kSmiTagSize);
// Check if the arguments will overflow the stack.
__ cmp(ecx, Operand(edx));
- __ j(greater, &okay, taken); // Signed comparison.
+ __ j(greater, &okay); // Signed comparison.
// Out of stack space.
__ push(Operand(ebp, 4 * kPointerSize)); // push this
@@ -1581,7 +1581,7 @@
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken, Label::kNear);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
__ Abort("Unreachable code: returned from tail call.");
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index fa7d08c..6786d52 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -29,10 +29,10 @@
#if defined(V8_TARGET_ARCH_IA32)
-#include "code-stubs.h"
#include "bootstrapper.h"
-#include "jsregexp.h"
+#include "code-stubs.h"
#include "isolate.h"
+#include "jsregexp.h"
#include "regexp-macro-assembler.h"
namespace v8 {
@@ -331,14 +331,6 @@
// Takes the operands in edx and eax and loads them as integers in eax
// and ecx.
- static void LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
- static void LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* operand_conversion_failure);
static void LoadUnknownsAsIntegers(MacroAssembler* masm,
bool use_sse3,
Label* operand_conversion_failure);
@@ -380,34 +372,24 @@
// trashed registers.
static void IntegerConvert(MacroAssembler* masm,
Register source,
- TypeInfo type_info,
bool use_sse3,
Label* conversion_failure) {
ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
if (use_sse3) {
CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
// Reserve space for 64 bit answer.
@@ -747,8 +729,7 @@
__ j(not_equal, slow);
// Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, TypeInfo::Unknown(), CpuFeatures::IsSupported(SSE3),
- slow);
+ IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
// Do the bitwise operation and check if the result fits in a smi.
Label try_float;
@@ -1018,7 +999,7 @@
// 3. Perform the smi check of the operands.
STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
__ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis, not_taken);
+ __ j(not_zero, ¬_smis);
// 4. Operands are both smis, perform the operation leaving the result in
// eax and check the result if necessary.
@@ -1047,7 +1028,7 @@
__ shl_cl(left);
// Check that the *signed* result fits in a smi.
__ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis, not_taken);
+ __ j(sign, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(left);
__ mov(eax, left);
@@ -1077,7 +1058,7 @@
// Smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ test(left, Immediate(0xc0000000));
- __ j(not_zero, slow, not_taken);
+ __ j(not_zero, slow);
// Tag the result and store it in register eax.
__ SmiTag(left);
__ mov(eax, left);
@@ -1086,12 +1067,12 @@
case Token::ADD:
ASSERT(right.is(eax));
__ add(right, Operand(left)); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
+ __ j(overflow, &use_fp_on_smis);
break;
case Token::SUB:
__ sub(left, Operand(right));
- __ j(overflow, &use_fp_on_smis, not_taken);
+ __ j(overflow, &use_fp_on_smis);
__ mov(eax, left);
break;
@@ -1105,7 +1086,7 @@
__ SmiUntag(right);
// Do multiplication.
__ imul(right, Operand(left)); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis, not_taken);
+ __ j(overflow, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(right, combined, &use_fp_on_smis);
break;
@@ -1116,7 +1097,7 @@
__ mov(edi, left);
// Check for 0 divisor.
__ test(right, Operand(right));
- __ j(zero, &use_fp_on_smis, not_taken);
+ __ j(zero, &use_fp_on_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
__ cdq();
@@ -1140,7 +1121,7 @@
case Token::MOD:
// Check for 0 divisor.
__ test(right, Operand(right));
- __ j(zero, ¬_smis, not_taken);
+ __ j(zero, ¬_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
@@ -1541,7 +1522,7 @@
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken, Label::kNear);
+ __ j(not_zero, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1756,7 +1737,7 @@
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken, Label::kNear);
+ __ j(not_zero, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1956,7 +1937,7 @@
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken, Label::kNear);
+ __ j(not_zero, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -2074,7 +2055,7 @@
// If the argument in edx is already an object, we skip the
// allocation of a heap number.
__ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ j(not_zero, &skip_allocation);
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
@@ -2090,7 +2071,7 @@
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
+ __ j(not_zero, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep eax and edx intact
@@ -2333,11 +2314,11 @@
int supported_exponent_limit =
(63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
__ cmp(Operand(edi), Immediate(supported_exponent_limit));
- __ j(below, &in_range, taken, Label::kNear);
+ __ j(below, &in_range, Label::kNear);
// Check for infinity and NaN. Both return NaN for sin.
__ cmp(Operand(edi), Immediate(0x7ff00000));
Label non_nan_result;
- __ j(not_equal, &non_nan_result, taken, Label::kNear);
+ __ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
// NaN is represented by 0x7ff8000000000000.
@@ -2407,58 +2388,6 @@
// Input: edx, eax are the left and right objects of a bit op.
// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- if (!type_info.IsDouble()) {
- if (!type_info.IsSmi()) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(edx);
- }
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
- }
-
- __ bind(&arg1_is_object);
-
- // Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
- if (!type_info.IsDouble()) {
- // Test if arg2 is a Smi.
- if (!type_info.IsSmi()) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
- } else {
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
- }
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
- }
-
- __ bind(&arg2_is_object);
-
- // Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
bool use_sse3,
Label* conversion_failure) {
@@ -2488,11 +2417,7 @@
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm,
- edx,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
+ IntegerConvert(masm, edx, use_sse3, conversion_failure);
__ mov(edx, ecx);
// Here edx has the untagged integer, eax has a Smi or a heap number.
@@ -2519,28 +2444,12 @@
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm,
- eax,
- TypeInfo::Unknown(),
- use_sse3,
- conversion_failure);
+ IntegerConvert(masm, eax, use_sse3, conversion_failure);
__ bind(&done);
__ mov(eax, edx);
}
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- if (type_info.IsNumber()) {
- LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
- } else {
- LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
- }
-}
-
-
void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
bool use_sse3,
Label* not_int32) {
@@ -2553,7 +2462,7 @@
Label load_smi, done;
__ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken, Label::kNear);
+ __ j(zero, &load_smi, Label::kNear);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2572,14 +2481,14 @@
// Load operand in edx into xmm0.
__ test(edx, Immediate(kSmiTagMask));
// Argument in edx is a smi.
- __ j(zero, &load_smi_edx, not_taken, Label::kNear);
+ __ j(zero, &load_smi_edx, Label::kNear);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1.
__ test(eax, Immediate(kSmiTagMask));
// Argument in eax is a smi.
- __ j(zero, &load_smi_eax, not_taken, Label::kNear);
+ __ j(zero, &load_smi_eax, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2604,7 +2513,7 @@
// Load operand in edx into xmm0, or branch to not_numbers.
__ test(edx, Immediate(kSmiTagMask));
// Argument in edx is a smi.
- __ j(zero, &load_smi_edx, not_taken, Label::kNear);
+ __ j(zero, &load_smi_edx, Label::kNear);
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
@@ -2613,7 +2522,7 @@
// Load operand in eax into xmm1, or branch to not_numbers.
__ test(eax, Immediate(kSmiTagMask));
// Argument in eax is a smi.
- __ j(zero, &load_smi_eax, not_taken, Label::kNear);
+ __ j(zero, &load_smi_eax, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
__ j(equal, &load_float_eax, Label::kNear);
__ jmp(not_numbers); // Argument in eax is not a number.
@@ -2674,7 +2583,7 @@
__ mov(scratch, Operand(esp, 2 * kPointerSize));
}
__ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken, Label::kNear);
+ __ j(zero, &load_smi_1, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
@@ -2684,7 +2593,7 @@
__ mov(scratch, Operand(esp, 1 * kPointerSize));
}
__ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken, Label::kNear);
+ __ j(zero, &load_smi_2, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2730,7 +2639,7 @@
// Test if both operands are floats or smi -> scratch=k_is_float;
// Otherwise scratch = k_not_float.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken, Label::kNear); // argument in edx is OK
+ __ j(zero, &test_other, Label::kNear); // argument in edx is OK
__ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
__ cmp(scratch, factory->heap_number_map());
@@ -2923,7 +2832,7 @@
// Check that the key is a smi.
Label slow;
__ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -2936,7 +2845,7 @@
// through register eax. Use unsigned comparison to get negative
// check for free.
__ cmp(edx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
+ __ j(above_equal, &slow);
// Read the argument from the stack and return it.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -2952,7 +2861,7 @@
__ bind(&adaptor);
__ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ cmp(edx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
+ __ j(above_equal, &slow);
// Read the argument from the stack and return it.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -3115,7 +3024,7 @@
ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ test(ebx, Operand(ebx));
- __ j(zero, &runtime, not_taken);
+ __ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
__ mov(eax, Operand(esp, kJSRegExpOffset));
@@ -3336,10 +3245,10 @@
// Check the result.
Label success;
__ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
- __ j(equal, &success, taken);
+ __ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure, taken);
+ __ j(equal, &failure);
__ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
// If not exception it can only be retry. Handle that in the runtime system.
__ j(not_equal, &runtime);
@@ -3658,7 +3567,7 @@
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi, not_taken);
+ __ j(not_zero, &non_smi);
__ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
@@ -3833,7 +3742,7 @@
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
+ __ j(parity_even, &unordered);
// Return a result of -1, 0, or 1, based on EFLAGS.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
@@ -3849,12 +3758,12 @@
__ FCmp();
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken);
+ __ j(parity_even, &unordered);
Label below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, not_taken);
- __ j(above, &above_label, not_taken);
+ __ j(below, &below_label);
+ __ j(above, &above_label);
__ Set(eax, Immediate(0));
__ ret(0);
@@ -4012,7 +3921,7 @@
// Check if receiver is a smi (which is a number value).
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &receiver_is_value, not_taken);
+ __ j(zero, &receiver_is_value);
// Check if the receiver is a valid JS object.
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
@@ -4035,10 +3944,10 @@
// Check that the function really is a JavaScript function.
__ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ j(zero, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow, not_taken);
+ __ j(not_equal, &slow);
// Fast-case: Just invoke the function.
ParameterCount actual(argc_);
@@ -4132,7 +4041,7 @@
__ lea(ecx, Operand(eax, 1));
// Lower 2 bits of ecx are 0 iff eax has failure tag.
__ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned, not_taken);
+ __ j(zero, &failure_returned);
ExternalReference pending_exception_address(
Isolate::k_pending_exception_address, masm->isolate());
@@ -4163,7 +4072,7 @@
// If the returned exception is RETRY_AFTER_GC continue at retry label
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, taken);
+ __ j(zero, &retry);
// Special handling of out of memory exceptions.
__ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
@@ -4417,7 +4326,7 @@
// Check that the left hand is a JS object.
__ test(object, Immediate(kSmiTagMask));
- __ j(zero, ¬_js_object, not_taken);
+ __ j(zero, ¬_js_object);
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
// If there is a call site cache don't look in the global cache, but do the
@@ -4445,7 +4354,7 @@
// Check that the function prototype is a JS object.
__ test(prototype, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ j(zero, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
@@ -4535,9 +4444,9 @@
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
__ test(function, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ j(zero, &slow);
__ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, not_taken);
+ __ j(not_equal, &slow);
// Null is not instance of anything.
__ cmp(object, factory->null_value());
@@ -4548,7 +4457,7 @@
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &object_not_null_or_smi, not_taken);
+ __ j(not_zero, &object_not_null_or_smi);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -4820,7 +4729,7 @@
__ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_, not_taken);
+ __ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
__ Set(result_, Immediate(factory->single_character_string_cache()));
@@ -4832,7 +4741,7 @@
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
__ cmp(result_, factory->undefined_value());
- __ j(equal, &slow_case_, not_taken);
+ __ j(equal, &slow_case_);
__ bind(&exit_);
}
@@ -5781,7 +5690,7 @@
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken, Label::kNear);
+ __ j(not_zero, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
@@ -5811,12 +5720,12 @@
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub, not_taken, Label::kNear);
+ __ j(zero, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken, Label::kNear);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken, Label::kNear);
+ __ j(not_equal, &miss, Label::kNear);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -5832,7 +5741,7 @@
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, not_taken, Label::kNear);
+ __ j(parity_even, &unordered, Label::kNear);
// Return a result of -1, 0, or 1, based on EFLAGS.
// Performing mov, because xor would destroy the flag register.
@@ -5984,12 +5893,12 @@
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken, Label::kNear);
+ __ j(zero, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, not_taken, Label::kNear);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, not_taken, Label::kNear);
+ __ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ sub(eax, Operand(edx));
@@ -6069,17 +5978,17 @@
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done, taken);
+ __ j(equal, done);
// Stop if found the property.
__ cmp(entity_name, Handle<String>(name));
- __ j(equal, miss, not_taken);
+ __ j(equal, miss);
// Check if the entry name is not a symbol.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
kIsSymbolMask);
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
}
StringDictionaryLookupStub stub(properties,
@@ -6136,7 +6045,7 @@
r0,
times_4,
kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done, taken);
+ __ j(equal, done);
}
StringDictionaryLookupStub stub(elements,
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 0f95abd..bc65ddf 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -80,8 +80,8 @@
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
@@ -94,7 +94,9 @@
class StandardFrameConstants : public AllStatic {
public:
- static const int kFixedFrameSize = 4;
+ // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+ // object pointer.
+ static const int kFixedFrameSize = 4; // Currently unused.
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 3660a77..59c3373 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -201,7 +201,7 @@
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
@@ -245,7 +245,7 @@
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken, Label::kNear);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -278,7 +278,7 @@
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, taken, Label::kNear);
+ __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
@@ -390,13 +390,20 @@
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
- __ Set(result_register(), Immediate(lit));
+ if (lit->IsSmi()) {
+ __ SafeSet(result_register(), Immediate(lit));
+ } else {
+ __ Set(result_register(), Immediate(lit));
+ }
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates can be pushed directly.
- __ push(Immediate(lit));
+ if (lit->IsSmi()) {
+ __ SafePush(Immediate(lit));
+ } else {
+ __ push(Immediate(lit));
+ }
}
@@ -739,7 +746,7 @@
}
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
- __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
+ __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
@@ -1193,7 +1200,7 @@
__ mov(edx,
ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
slow));
- __ mov(eax, Immediate(key_literal->handle()));
+ __ SafeSet(eax, Immediate(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
@@ -1278,7 +1285,7 @@
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- __ mov(eax, Immediate(key_literal->handle()));
+ __ SafeSet(eax, Immediate(key_literal->handle()));
// Do a keyed property load.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
@@ -1549,7 +1556,7 @@
MemOperand slot_operand =
EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
__ push(slot_operand);
- __ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
+ __ SafeSet(eax, Immediate(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
@@ -1562,7 +1569,7 @@
MemOperand slot_operand =
EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
__ push(slot_operand);
- __ push(Immediate(property->key()->AsLiteral()->handle()));
+ __ SafePush(Immediate(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1641,6 +1648,7 @@
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
+ ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
@@ -1725,7 +1733,7 @@
__ imul(eax, Operand(ecx));
__ j(overflow, &stub_call);
__ test(eax, Operand(eax));
- __ j(not_zero, &done, taken, Label::kNear);
+ __ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
__ or_(ebx, Operand(ecx));
__ j(negative, &stub_call);
@@ -1807,7 +1815,7 @@
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
__ mov(edx, eax);
- __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
+ __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
@@ -2320,7 +2328,7 @@
SetSourcePosition(expr->position());
// Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
+ __ SafeSet(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
Handle<Code> construct_builtin =
@@ -2660,7 +2668,7 @@
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2672,7 +2680,7 @@
Label exit;
// Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -3813,7 +3821,7 @@
MemOperand slot_operand =
EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
__ push(slot_operand);
- __ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
+ __ SafeSet(eax, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 6db6d30..fa41349 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -50,11 +50,11 @@
// Register usage:
// type: holds the receiver instance type on entry.
__ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object, not_taken);
+ __ j(equal, global_object);
__ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object, not_taken);
+ __ j(equal, global_object);
__ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object, not_taken);
+ __ j(equal, global_object);
}
@@ -73,13 +73,13 @@
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
// Check that the receiver is a valid JS object.
__ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss, not_taken);
+ __ j(below, miss);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -90,7 +90,7 @@
__ test_b(FieldOperand(r1, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss, not_taken);
+ __ j(not_zero, miss);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CheckMap(r0, FACTORY->hash_table_map(), miss, true);
@@ -146,7 +146,7 @@
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss_label, not_taken);
+ __ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -204,7 +204,7 @@
PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
__ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label, not_taken);
+ __ j(not_zero, miss_label);
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -294,9 +294,9 @@
times_pointer_size,
NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
- __ j(equal, &done, taken);
+ __ j(equal, &done);
} else {
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
}
}
@@ -374,7 +374,7 @@
// Check that the object isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, slow, not_taken);
+ __ j(zero, slow);
// Get the map of the receiver.
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -382,7 +382,7 @@
// Check bit field.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
(1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
- __ j(not_zero, slow, not_taken);
+ __ j(not_zero, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
@@ -390,7 +390,7 @@
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow, not_taken);
+ __ j(below, slow);
}
@@ -453,12 +453,12 @@
// Is the string an array index, with cached numeric value?
__ mov(hash, FieldOperand(key, String::kHashFieldOffset));
__ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string, not_taken);
+ __ j(zero, index_string);
// Is the string a symbol?
ASSERT(kSymbolTag != 0);
__ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
- __ j(zero, not_symbol, not_taken);
+ __ j(zero, not_symbol);
}
@@ -473,7 +473,7 @@
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
+ __ j(not_zero, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
@@ -485,7 +485,7 @@
// now in ecx.
__ test_b(FieldOperand(ecx, Map::kBitField2Offset),
1 << Map::kHasFastElements);
- __ j(zero, &check_number_dictionary, not_taken);
+ __ j(zero, &check_number_dictionary);
GenerateFastArrayLoad(masm,
edx,
@@ -663,11 +663,11 @@
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ j(zero, &slow);
// Check that the key is an array index, that is Uint32.
__ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -677,7 +677,7 @@
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
__ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Everything is fine, call runtime.
__ pop(ecx);
@@ -708,22 +708,22 @@
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ j(zero, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JS object.
__ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow, not_taken);
+ __ j(below, &slow);
// Object case: Check key against length in the elements array.
// eax: value
@@ -733,7 +733,7 @@
// Check that the object is in fast mode and writable.
__ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(below, &fast, taken);
+ __ j(below, &fast);
// Slow case: call runtime.
__ bind(&slow);
@@ -749,9 +749,9 @@
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
// do not leave holes in the array:
- __ j(not_equal, &slow, not_taken);
+ __ j(not_equal, &slow);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow, not_taken);
+ __ j(above_equal, &slow);
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
@@ -770,7 +770,7 @@
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra, not_taken);
+ __ j(above_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
@@ -814,9 +814,9 @@
//
// Check for number.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &number, not_taken);
+ __ j(zero, &number);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number, taken);
+ __ j(not_equal, &non_number);
__ bind(&number);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::NUMBER_FUNCTION_INDEX, edx);
@@ -825,7 +825,7 @@
// Check for string.
__ bind(&non_number);
__ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string, taken);
+ __ j(above_equal, &non_string);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::STRING_FUNCTION_INDEX, edx);
__ jmp(&probe);
@@ -833,9 +833,9 @@
// Check for boolean.
__ bind(&non_string);
__ cmp(edx, FACTORY->true_value());
- __ j(equal, &boolean, not_taken);
+ __ j(equal, &boolean);
__ cmp(edx, FACTORY->false_value());
- __ j(not_equal, &miss, taken);
+ __ j(not_equal, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@@ -862,11 +862,11 @@
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
// Check that the value is a JavaScript function, fetching its map into eax.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Invoke the function.
ParameterCount actual(argc);
@@ -942,7 +942,7 @@
Label invoke, global;
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &invoke, not_taken, Label::kNear);
+ __ j(zero, &invoke, Label::kNear);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
@@ -1024,7 +1024,7 @@
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string, not_taken);
+ __ j(not_zero, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
@@ -1314,22 +1314,22 @@
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
__ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that value is a smi.
__ test(value, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
+ __ j(not_zero, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ pop(scratch);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 9163496..1f2cd4c 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -572,7 +572,7 @@
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&done);
} else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+ __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
}
}
}
@@ -1482,7 +1482,7 @@
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the unordered case, which produces a false value.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, not_taken, Label::kNear);
+ __ j(parity_even, &unordered, Label::kNear);
} else {
EmitCmpI(left, right);
}
@@ -1543,6 +1543,31 @@
}
+void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+ __ cmp(left, Operand(right));
+ __ mov(result, factory()->false_value());
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(result, factory()->true_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmp(left, Operand(right));
+ EmitBranch(true_block, false_block, equal);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1702,6 +1727,44 @@
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(input, Immediate(kSmiTagMask));
+ __ j(zero, &false_label, Label::kNear);
+ __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(result, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(zero, &false_label, Label::kNear);
+ __ mov(result, factory()->true_value());
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ mov(result, factory()->false_value());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ test(input, Immediate(kSmiTagMask));
+ __ j(zero, chunk_->GetAssemblyLabel(false_block));
+ __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ EmitBranch(true_block, false_block, not_zero);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
@@ -1968,7 +2031,7 @@
// A Smi is not an instance of anything.
__ test(object, Immediate(kSmiTagMask));
- __ j(zero, &false_result, not_taken);
+ __ j(zero, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -1978,7 +2041,7 @@
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
__ cmp(map, factory()->the_hole_value()); // Patched to cached map.
- __ j(not_equal, &cache_miss, not_taken, Label::kNear);
+ __ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
@@ -3559,7 +3622,7 @@
// Smi check.
__ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, not_taken, Label::kNear);
+ __ j(zero, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 12a951b..4acbf72 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -239,6 +239,13 @@
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -1080,6 +1087,12 @@
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@@ -1113,6 +1126,10 @@
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareSymbolEq()) {
+ HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
+ return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
@@ -1201,7 +1218,7 @@
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1540,6 +1557,15 @@
}
+LInstruction* LChunkBuilder::DoCompareSymbolEq(
+ HCompareSymbolEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1564,6 +1590,14 @@
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 8a93bda..1ed3eaa 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -73,6 +73,8 @@
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
V(CmpJSObjectEqAndBranch) \
+ V(CmpSymbolEq) \
+ V(CmpSymbolEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(CmpTAndBranch) \
@@ -102,14 +104,16 @@
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -666,6 +670,28 @@
};
+class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCmpSymbolEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
+};
+
+
+class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+};
+
+
class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
@@ -743,6 +769,31 @@
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 4ee9201..d5eda7a 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -191,7 +191,7 @@
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
- xor_(dst, Operand(dst)); // shorter than mov
+ xor_(dst, Operand(dst)); // Shorter than mov.
} else {
mov(dst, x);
}
@@ -203,6 +203,33 @@
}
+bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
+ static const int kMaxImmediateBits = 17;
+ if (x.rmode_ != RelocInfo::NONE) return false;
+ return !is_intn(x.x_, kMaxImmediateBits);
+}
+
+
+void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ Set(dst, Immediate(x.x_ ^ jit_cookie()));
+ xor_(dst, jit_cookie());
+ } else {
+ Set(dst, x);
+ }
+}
+
+
+void MacroAssembler::SafePush(const Immediate& x) {
+ if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
+ push(Immediate(x.x_ ^ jit_cookie()));
+ xor_(Operand(esp, 0), Immediate(jit_cookie()));
+ } else {
+ push(x);
+ }
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -513,7 +540,7 @@
Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
Label skip;
cmp(ebp, 0);
- j(equal, &skip, not_taken, Label::kNear);
+ j(equal, &skip, Label::kNear);
mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
bind(&skip);
@@ -614,7 +641,7 @@
// Check if both contexts are the same.
cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- j(equal, &same_contexts, taken);
+ j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
// as a temporary register.
@@ -644,7 +671,7 @@
mov(scratch, FieldOperand(scratch, token_offset));
cmp(scratch, FieldOperand(holder_reg, token_offset));
pop(holder_reg);
- j(not_equal, miss, not_taken);
+ j(not_equal, miss);
bind(&same_contexts);
}
@@ -732,9 +759,9 @@
mov(top_reg, result);
}
add(Operand(top_reg), Immediate(object_size));
- j(carry, gc_required, not_taken);
+ j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required, not_taken);
+ j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch);
@@ -831,9 +858,9 @@
mov(result_end, object_size);
}
add(result_end, Operand(result));
- j(carry, gc_required, not_taken);
+ j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required, not_taken);
+ j(above, gc_required);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1062,9 +1089,9 @@
Label* then_label) {
Label ok;
test(result, Operand(result));
- j(not_zero, &ok, taken);
+ j(not_zero, &ok);
test(op, Operand(op));
- j(sign, then_label, not_taken);
+ j(sign, then_label);
bind(&ok);
}
@@ -1076,10 +1103,10 @@
Label* then_label) {
Label ok;
test(result, Operand(result));
- j(not_zero, &ok, taken);
+ j(not_zero, &ok);
mov(scratch, Operand(op1));
or_(scratch, Operand(op2));
- j(sign, then_label, not_taken);
+ j(sign, then_label);
bind(&ok);
}
@@ -1090,17 +1117,17 @@
Label* miss) {
// Check that the receiver isn't a smi.
test(function, Immediate(kSmiTagMask));
- j(zero, miss, not_taken);
+ j(zero, miss);
// Check that the function really is a function.
CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss, not_taken);
+ j(not_equal, miss);
// Make sure that the function has an instance prototype.
Label non_instance;
movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, not_taken);
+ j(not_zero, &non_instance);
// Get the prototype or initial map from the function.
mov(result,
@@ -1110,7 +1137,7 @@
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
- j(equal, miss, not_taken);
+ j(equal, miss);
// If the function does not have an initial map, we're done.
Label done;
@@ -1391,7 +1418,7 @@
// Check if the result handle holds 0.
test(eax, Operand(eax));
- j(zero, &empty_handle, not_taken);
+ j(zero, &empty_handle);
// It was non-zero. Dereference to get the result value.
mov(eax, Operand(eax, 0));
bind(&prologue);
@@ -1401,7 +1428,7 @@
sub(Operand::StaticVariable(level_address), Immediate(1));
Assert(above_equal, "Invalid HandleScope level");
cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles, not_taken);
+ j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
// Check if the function scheduled an exception.
@@ -1409,7 +1436,7 @@
ExternalReference::scheduled_exception_address(isolate());
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
- j(not_equal, &promote_scheduled_exception, not_taken);
+ j(not_equal, &promote_scheduled_exception);
LeaveApiExitFrame();
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
@@ -1849,7 +1876,7 @@
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
- j(cc, &L, taken);
+ j(cc, &L);
Abort(msg);
// will not return here
bind(&L);
@@ -1898,59 +1925,6 @@
}
-void MacroAssembler::JumpIfNotNumber(Register reg,
- TypeInfo info,
- Label* on_not_number) {
- if (emit_debug_code()) AbortIfSmi(reg);
- if (!info.IsNumber()) {
- cmp(FieldOperand(reg, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- j(not_equal, on_not_number);
- }
-}
-
-
-void MacroAssembler::ConvertToInt32(Register dst,
- Register source,
- Register scratch,
- TypeInfo info,
- Label* on_not_int32) {
- if (emit_debug_code()) {
- AbortIfSmi(source);
- AbortIfNotNumber(source);
- }
- if (info.IsInteger32()) {
- cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
- } else {
- Label done;
- bool push_pop = (scratch.is(no_reg) && dst.is(source));
- ASSERT(!scratch.is(source));
- if (push_pop) {
- push(dst);
- scratch = dst;
- }
- if (scratch.is(no_reg)) scratch = dst;
- cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
- cmp(scratch, 0x80000000u);
- if (push_pop) {
- j(not_equal, &done);
- pop(dst);
- jmp(on_not_int32);
- } else {
- j(equal, on_not_int32);
- }
-
- bind(&done);
- if (push_pop) {
- add(Operand(esp), Immediate(kPointerSize)); // Pop.
- }
- if (!scratch.is(dst)) {
- mov(dst, scratch);
- }
- }
-}
-
-
void MacroAssembler::LoadPowerOf2(XMMRegister dst,
Register scratch,
int power) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 271f692..f88d992 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -29,7 +29,6 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
-#include "type-info.h"
namespace v8 {
namespace internal {
@@ -194,6 +193,11 @@
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // Support for constant splitting.
+ bool IsUnsafeImmediate(const Immediate& x);
+ void SafeSet(Register dst, const Immediate& x);
+ void SafePush(const Immediate& x);
+
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -245,16 +249,6 @@
}
// Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
- ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- if (info.IsSmi()) {
- ASSERT(kSmiTag == 0);
- j(carry, non_smi);
- }
- }
-
- // Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, Label* is_smi) {
ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
@@ -265,25 +259,14 @@
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, not_taken);
+ j(zero, smi_label);
}
// Jump if register contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label, not_taken);
+ j(not_zero, not_smi_label);
}
- // Assumes input is a heap object.
- void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
-
- // Assumes input is a heap number. Jumps on things out of range. Also jumps
- // on the min negative int32. Ignores frational parts.
- void ConvertToInt32(Register dst,
- Register src, // Can be the same as dst.
- Register scratch, // Can be no_reg or dst, but not src.
- TypeInfo info,
- Label* on_not_int32);
-
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
// Abort execution if argument is not a number. Used in debug code.
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 91f54da..8db2e9b 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -305,7 +305,7 @@
// The length of a capture should not be negative. This can only happen
// if the end of the capture is unrecorded, or at a point earlier than
// the start of the capture.
- BranchOrBacktrack(less, on_no_match, not_taken);
+ BranchOrBacktrack(less, on_no_match);
// If length is zero, either the capture is empty or it is completely
// uncaptured. In either case succeed immediately.
@@ -348,7 +348,7 @@
__ add(Operand(edi), Immediate(1));
// Compare to end of match, and loop if not done.
__ cmp(edi, Operand(ebx));
- __ j(below, &loop, taken);
+ __ j(below, &loop);
__ jmp(&success);
__ bind(&fail);
@@ -687,11 +687,11 @@
__ mov(ecx, esp);
__ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit, not_taken);
+ __ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ cmp(ecx, num_registers_ * kPointerSize);
- __ j(above_equal, &stack_ok, taken);
+ __ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(eax, EXCEPTION);
@@ -1142,8 +1142,7 @@
void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
- Label* to,
- Hint hint) {
+ Label* to) {
if (condition < 0) { // No condition
if (to == NULL) {
Backtrack();
@@ -1153,10 +1152,10 @@
return;
}
if (to == NULL) {
- __ j(condition, &backtrack_label_, hint);
+ __ j(condition, &backtrack_label_);
return;
}
- __ j(condition, to, hint);
+ __ j(condition, to);
}
@@ -1209,7 +1208,7 @@
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above, &no_preempt, taken);
+ __ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 70606da..21c86d0 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -168,7 +168,7 @@
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
+ void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index e5e6b47..924a489 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -57,7 +57,7 @@
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
@@ -76,7 +76,7 @@
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
@@ -126,11 +126,11 @@
// Bail out if the receiver has a named interceptor or requires access checks.
__ test_b(FieldOperand(r0, Map::kBitFieldOffset),
kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label, not_taken);
+ __ j(not_zero, miss_label);
// Check that receiver is a JSObject.
__ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
- __ j(below, miss_label, not_taken);
+ __ j(below, miss_label);
// Load properties array.
Register properties = r0;
@@ -189,7 +189,7 @@
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Get the map of the receiver and compute the hash.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -250,11 +250,11 @@
Label* miss_label) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
+ __ j(zero, miss_label);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label, not_taken);
+ __ j(not_equal, miss_label);
// Load length directly from the JS array.
__ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
@@ -271,14 +271,14 @@
Label* non_string_object) {
// Check that the object isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, smi, not_taken);
+ __ j(zero, smi);
// Check that the object is a string.
__ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
ASSERT(kNotStringTag != 0);
__ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object, not_taken);
+ __ j(not_zero, non_string_object);
}
@@ -303,7 +303,7 @@
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check if the wrapped value is a string and load the length
// directly if it is.
@@ -508,7 +508,7 @@
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
CallOptimization optimization(lookup);
@@ -725,12 +725,12 @@
Label* miss_label) {
// Check that the object isn't a smi.
__ test(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
+ __ j(zero, miss_label);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
- __ j(not_equal, miss_label, not_taken);
+ __ j(not_equal, miss_label);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -820,7 +820,7 @@
__ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
Immediate(masm->isolate()->factory()->the_hole_value()));
}
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
return cell;
}
@@ -925,7 +925,7 @@
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
@@ -945,7 +945,7 @@
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check access rights to the global object. This has to happen
// after the map check so that we know that the object is
// actually a global object.
@@ -972,7 +972,7 @@
// Check the holder map.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(holder->map())));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1007,7 +1007,7 @@
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
// Check the prototype chain.
Register reg =
@@ -1032,7 +1032,7 @@
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1099,7 +1099,7 @@
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
// Check that the maps haven't changed.
CheckPrototypes(object, receiver, holder,
@@ -1126,7 +1126,7 @@
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -1255,7 +1255,7 @@
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
}
}
@@ -1277,7 +1277,7 @@
// the receiver cannot be a smi.
if (object != holder) {
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
}
// Check that the maps haven't changed.
@@ -1304,17 +1304,17 @@
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
+ __ j(zero, miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
Immediate(Handle<SharedFunctionInfo>(function->shared())));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
} else {
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, miss, not_taken);
+ __ j(not_equal, miss);
}
}
@@ -1352,7 +1352,7 @@
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
@@ -1362,9 +1362,9 @@
// Check that the function really is a function.
__ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2129,7 +2129,7 @@
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss_before_stack_reserved, not_taken);
+ __ j(zero, &miss_before_stack_reserved);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_const(), 1);
@@ -2197,7 +2197,7 @@
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2229,7 +2229,7 @@
} else {
// Check that the object is a string or a symbol.
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss, not_taken);
+ __ j(above_equal, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
@@ -2247,9 +2247,9 @@
Label fast;
// Check that the object is a smi or a heap number.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast, taken);
+ __ j(zero, &fast);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
@@ -2269,9 +2269,9 @@
Label fast;
// Check that the object is a boolean.
__ cmp(edx, factory()->true_value());
- __ j(equal, &fast, taken);
+ __ j(equal, &fast);
__ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
@@ -2339,9 +2339,9 @@
// Check that the function really is a function.
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -2478,12 +2478,12 @@
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2528,12 +2528,12 @@
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2580,7 +2580,7 @@
// Check that the map of the global has not changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Compute the cell operand to use.
@@ -2633,7 +2633,7 @@
// Check that the name has not changed.
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
@@ -2666,30 +2666,30 @@
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Check that the map matches.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that the key is a smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
+ __ j(not_zero, &miss);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that the key is within bounds.
if (receiver->IsJSArray()) {
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &miss, not_taken);
+ __ j(above_equal, &miss);
} else {
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &miss, not_taken);
+ __ j(above_equal, &miss);
}
// Do the store and update the write barrier. Make sure to preserve
@@ -2723,7 +2723,7 @@
// Check that the receiver isn't a smi.
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
@@ -2876,7 +2876,7 @@
// the receiver cannot be a smi.
if (object != holder) {
__ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
}
// Check that the maps haven't changed.
@@ -2893,7 +2893,7 @@
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ cmp(ebx, factory()->the_hole_value());
- __ j(equal, &miss, not_taken);
+ __ j(equal, &miss);
} else if (FLAG_debug_code) {
__ cmp(ebx, factory()->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
@@ -2929,7 +2929,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -2959,7 +2959,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
ecx, edi, callback, name, &miss);
@@ -2994,7 +2994,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
value, name, &miss);
@@ -3022,7 +3022,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
@@ -3058,7 +3058,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
__ bind(&miss);
@@ -3083,7 +3083,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss);
@@ -3108,7 +3108,7 @@
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
__ bind(&miss);
@@ -3130,16 +3130,16 @@
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
+ __ j(zero, &miss);
// Check that the map matches.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss, not_taken);
+ __ j(not_equal, &miss);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, not_taken);
+ __ j(not_zero, &miss);
// Get the elements array.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3147,13 +3147,13 @@
// Check that the key is within bounds.
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss, not_taken);
+ __ j(above_equal, &miss);
// Load the result and make sure it's not the hole.
__ mov(ebx, Operand(ecx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
__ cmp(ebx, factory()->the_hole_value());
- __ j(equal, &miss, not_taken);
+ __ j(equal, &miss);
__ mov(eax, ebx);
__ ret(0);
@@ -3182,7 +3182,7 @@
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
__ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &generic_stub_call, not_taken);
+ __ j(not_equal, &generic_stub_call);
#endif
// Load the initial map and verify that it is in fact a map.
@@ -3315,11 +3315,11 @@
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
+ __ j(zero, &slow);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
+ __ j(not_zero, &slow);
// Check that the map matches.
__ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
diff --git a/src/isolate.cc b/src/isolate.cc
index 4871702..9fac06a 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -40,6 +40,7 @@
#include "isolate.h"
#include "lithium-allocator.h"
#include "log.h"
+#include "messages.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "scanner.h"
@@ -49,6 +50,7 @@
#include "spaces.h"
#include "stub-cache.h"
#include "version.h"
+#include "vm-state-inl.h"
namespace v8 {
@@ -61,6 +63,7 @@
return new_id;
}
+
int ThreadId::GetCurrentThreadId() {
int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
if (thread_id == 0) {
@@ -70,6 +73,53 @@
return thread_id;
}
+
+ThreadLocalTop::ThreadLocalTop() {
+ InitializeInternal();
+}
+
+
+void ThreadLocalTop::InitializeInternal() {
+ c_entry_fp_ = 0;
+ handler_ = 0;
+#ifdef USE_SIMULATOR
+ simulator_ = NULL;
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ js_entry_sp_ = NULL;
+ external_callback_ = NULL;
+#endif
+#ifdef ENABLE_VMSTATE_TRACKING
+ current_vm_state_ = EXTERNAL;
+#endif
+ try_catch_handler_address_ = NULL;
+ context_ = NULL;
+ thread_id_ = ThreadId::Invalid();
+ external_caught_exception_ = false;
+ failed_access_check_callback_ = NULL;
+ save_context_ = NULL;
+ catcher_ = NULL;
+}
+
+
+void ThreadLocalTop::Initialize() {
+ InitializeInternal();
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+ simulator_ = Simulator::current(isolate_);
+#elif V8_TARGET_ARCH_MIPS
+ simulator_ = Simulator::current(isolate_);
+#endif
+#endif
+ thread_id_ = ThreadId::Current();
+}
+
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+ return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
// Create a dummy thread that will wait forever on a semaphore. The only
// purpose for this thread is to have some stack area to save essential data
// into for use by a stacks only core dump (aka minidump).
@@ -372,6 +422,890 @@
}
+Address Isolate::get_address_from_id(Isolate::AddressId id) {
+ return isolate_addresses_[id];
+}
+
+
+char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
+ ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
+ Iterate(v, thread);
+ return thread_storage + sizeof(ThreadLocalTop);
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v) {
+ v->VisitThread(this, thread_local_top());
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v, char* t) {
+ ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
+ v->VisitThread(this, thread);
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+ // Visit the roots from the top for a given thread.
+ Object* pending;
+ // The pending exception can sometimes be a failure. We can't show
+ // that to the GC, which only understands objects.
+ if (thread->pending_exception_->ToObject(&pending)) {
+ v->VisitPointer(&pending);
+ thread->pending_exception_ = pending; // In case GC updated it.
+ }
+ v->VisitPointer(&(thread->pending_message_obj_));
+ v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
+ v->VisitPointer(BitCast<Object**>(&(thread->context_)));
+ Object* scheduled;
+ if (thread->scheduled_exception_->ToObject(&scheduled)) {
+ v->VisitPointer(&scheduled);
+ thread->scheduled_exception_ = scheduled;
+ }
+
+ for (v8::TryCatch* block = thread->TryCatchHandler();
+ block != NULL;
+ block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
+ v->VisitPointer(BitCast<Object**>(&(block->exception_)));
+ v->VisitPointer(BitCast<Object**>(&(block->message_)));
+ }
+
+ // Iterate over pointers on native execution stack.
+ for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
+ it.frame()->Iterate(v);
+ }
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v) {
+ ThreadLocalTop* current_t = thread_local_top();
+ Iterate(v, current_t);
+}
+
+
+void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
+ // The ARM simulator has a separate JS stack. We therefore register
+ // the C++ try catch handler with the simulator and get back an
+ // address that can be used for comparisons with addresses into the
+ // JS stack. When running without the simulator, the address
+ // returned will be the address of the C++ try catch handler itself.
+ Address address = reinterpret_cast<Address>(
+ SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
+ thread_local_top()->set_try_catch_handler_address(address);
+}
+
+
+void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
+ ASSERT(thread_local_top()->TryCatchHandler() == that);
+ thread_local_top()->set_try_catch_handler_address(
+ reinterpret_cast<Address>(that->next_));
+ thread_local_top()->catcher_ = NULL;
+ SimulatorStack::UnregisterCTryCatch();
+}
+
+
+Handle<String> Isolate::StackTraceString() {
+ if (stack_trace_nesting_level_ == 0) {
+ stack_trace_nesting_level_++;
+ HeapStringAllocator allocator;
+ StringStream::ClearMentionedObjectCache();
+ StringStream accumulator(&allocator);
+ incomplete_message_ = &accumulator;
+ PrintStack(&accumulator);
+ Handle<String> stack_trace = accumulator.ToString();
+ incomplete_message_ = NULL;
+ stack_trace_nesting_level_ = 0;
+ return stack_trace;
+ } else if (stack_trace_nesting_level_ == 1) {
+ stack_trace_nesting_level_++;
+ OS::PrintError(
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
+ OS::PrintError(
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ incomplete_message_->OutputToStdOut();
+ return factory()->empty_symbol();
+ } else {
+ OS::Abort();
+ // Unreachable
+ return factory()->empty_symbol();
+ }
+}
+
+
+Handle<JSArray> Isolate::CaptureCurrentStackTrace(
+ int frame_limit, StackTrace::StackTraceOptions options) {
+ // Ensure no negative values.
+ int limit = Max(frame_limit, 0);
+ Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+
+ Handle<String> column_key = factory()->LookupAsciiSymbol("column");
+ Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
+ Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+ Handle<String> name_or_source_url_key =
+ factory()->LookupAsciiSymbol("nameOrSourceURL");
+ Handle<String> script_name_or_source_url_key =
+ factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
+ Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
+ Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+ Handle<String> constructor_key =
+ factory()->LookupAsciiSymbol("isConstructor");
+
+ StackTraceFrameIterator it(this);
+ int frames_seen = 0;
+ while (!it.done() && (frames_seen < limit)) {
+ JavaScriptFrame* frame = it.frame();
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+ // Create a JSObject to hold the information for the StackFrame.
+ Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
+
+ Handle<JSFunction> fun = frames[i].function();
+ Handle<Script> script(Script::cast(fun->shared()->script()));
+
+ if (options & StackTrace::kLineNumber) {
+ int script_line_offset = script->line_offset()->value();
+ int position = frames[i].code()->SourcePosition(frames[i].pc());
+ int line_number = GetScriptLineNumber(script, position);
+ // line_number is already shifted by the script_line_offset.
+ int relative_line_number = line_number - script_line_offset;
+ if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ int start = (relative_line_number == 0) ? 0 :
+ Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+ int column_offset = position - start;
+ if (relative_line_number == 0) {
+ // For the case where the code is on the same line as the script
+ // tag.
+ column_offset += script->column_offset()->value();
+ }
+ SetLocalPropertyNoThrow(stackFrame, column_key,
+ Handle<Smi>(Smi::FromInt(column_offset + 1)));
+ }
+ SetLocalPropertyNoThrow(stackFrame, line_key,
+ Handle<Smi>(Smi::FromInt(line_number + 1)));
+ }
+
+ if (options & StackTrace::kScriptName) {
+ Handle<Object> script_name(script->name(), this);
+ SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
+ }
+
+ if (options & StackTrace::kScriptNameOrSourceURL) {
+ Handle<Object> script_name(script->name(), this);
+ Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+ Handle<Object> property = GetProperty(script_wrapper,
+ name_or_source_url_key);
+ ASSERT(property->IsJSFunction());
+ Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+ NULL, &caught_exception);
+ if (caught_exception) {
+ result = factory()->undefined_value();
+ }
+ SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
+ result);
+ }
+
+ if (options & StackTrace::kFunctionName) {
+ Handle<Object> fun_name(fun->shared()->name(), this);
+ if (fun_name->ToBoolean()->IsFalse()) {
+ fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
+ }
+ SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
+ }
+
+ if (options & StackTrace::kIsEval) {
+ int type = Smi::cast(script->compilation_type())->value();
+ Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
+ factory()->true_value() : factory()->false_value();
+ SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
+ }
+
+ if (options & StackTrace::kIsConstructor) {
+ Handle<Object> is_constructor = (frames[i].is_constructor()) ?
+ factory()->true_value() : factory()->false_value();
+ SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
+ }
+
+ FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+ frames_seen++;
+ }
+ it.Advance();
+ }
+
+ stack_trace->set_length(Smi::FromInt(frames_seen));
+ return stack_trace;
+}
+
+
+void Isolate::PrintStack() {
+ if (stack_trace_nesting_level_ == 0) {
+ stack_trace_nesting_level_++;
+
+ StringAllocator* allocator;
+ if (preallocated_message_space_ == NULL) {
+ allocator = new HeapStringAllocator();
+ } else {
+ allocator = preallocated_message_space_;
+ }
+
+ StringStream::ClearMentionedObjectCache();
+ StringStream accumulator(allocator);
+ incomplete_message_ = &accumulator;
+ PrintStack(&accumulator);
+ accumulator.OutputToStdOut();
+ accumulator.Log();
+ incomplete_message_ = NULL;
+ stack_trace_nesting_level_ = 0;
+ if (preallocated_message_space_ == NULL) {
+ // Remove the HeapStringAllocator created above.
+ delete allocator;
+ }
+ } else if (stack_trace_nesting_level_ == 1) {
+ stack_trace_nesting_level_++;
+ OS::PrintError(
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
+ OS::PrintError(
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ incomplete_message_->OutputToStdOut();
+ }
+}
+
+
+static void PrintFrames(StringStream* accumulator,
+ StackFrame::PrintMode mode) {
+ StackFrameIterator it;
+ for (int i = 0; !it.done(); it.Advance()) {
+ it.frame()->Print(accumulator, mode, i++);
+ }
+}
+
+
+void Isolate::PrintStack(StringStream* accumulator) {
+ if (!IsInitialized()) {
+ accumulator->Add(
+ "\n==== Stack trace is not available ==========================\n\n");
+ accumulator->Add(
+ "\n==== Isolate for the thread is not initialized =============\n\n");
+ return;
+ }
+ // The MentionedObjectCache is not GC-proof at the moment.
+ AssertNoAllocation nogc;
+ ASSERT(StringStream::IsMentionedObjectCacheClear());
+
+ // Avoid printing anything if there are no frames.
+ if (c_entry_fp(thread_local_top()) == 0) return;
+
+ accumulator->Add(
+ "\n==== Stack trace ============================================\n\n");
+ PrintFrames(accumulator, StackFrame::OVERVIEW);
+
+ accumulator->Add(
+ "\n==== Details ================================================\n\n");
+ PrintFrames(accumulator, StackFrame::DETAILS);
+
+ accumulator->PrintMentionedObjectCache();
+ accumulator->Add("=====================\n\n");
+}
+
+
+void Isolate::SetFailedAccessCheckCallback(
+ v8::FailedAccessCheckCallback callback) {
+ thread_local_top()->failed_access_check_callback_ = callback;
+}
+
+
+void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+ if (!thread_local_top()->failed_access_check_callback_) return;
+
+ ASSERT(receiver->IsAccessCheckNeeded());
+ ASSERT(context());
+
+ // Get the data object from access check info.
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return;
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == heap_.undefined_value()) return;
+
+ HandleScope scope;
+ Handle<JSObject> receiver_handle(receiver);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+ thread_local_top()->failed_access_check_callback_(
+ v8::Utils::ToLocal(receiver_handle),
+ type,
+ v8::Utils::ToLocal(data));
+}
+
+
+enum MayAccessDecision {
+ YES, NO, UNKNOWN
+};
+
+
+static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
+ JSObject* receiver,
+ v8::AccessType type) {
+ // During bootstrapping, callback functions are not enabled yet.
+ if (isolate->bootstrapper()->IsActive()) return YES;
+
+ if (receiver->IsJSGlobalProxy()) {
+ Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+ if (!receiver_context->IsContext()) return NO;
+
+ // Get the global context of current top context.
+ // avoid using Isolate::global_context() because it uses Handle.
+ Context* global_context = isolate->context()->global()->global_context();
+ if (receiver_context == global_context) return YES;
+
+ if (Context::cast(receiver_context)->security_token() ==
+ global_context->security_token())
+ return YES;
+ }
+
+ return UNKNOWN;
+}
+
+
+bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
+ v8::AccessType type) {
+ ASSERT(receiver->IsAccessCheckNeeded());
+
+ // The callers of this method are not expecting a GC.
+ AssertNoAllocation no_gc;
+
+ // Skip checks for hidden properties access. Note, we do not
+ // require existence of a context in this case.
+ if (key == heap_.hidden_symbol()) return true;
+
+ // Check for compatibility between the security tokens in the
+ // current lexical context and the accessed object.
+ ASSERT(context());
+
+ MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+ if (decision != UNKNOWN) return decision == YES;
+
+ // Get named access check callback
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return false;
+
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == heap_.undefined_value()) return false;
+
+ Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
+ v8::NamedSecurityCallback callback =
+ v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+
+ if (!callback) return false;
+
+ HandleScope scope(this);
+ Handle<JSObject> receiver_handle(receiver, this);
+ Handle<Object> key_handle(key, this);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ LOG(this, ApiNamedSecurityCheck(key));
+ bool result = false;
+ {
+ // Leaving JavaScript.
+ VMState state(this, EXTERNAL);
+ result = callback(v8::Utils::ToLocal(receiver_handle),
+ v8::Utils::ToLocal(key_handle),
+ type,
+ v8::Utils::ToLocal(data));
+ }
+ return result;
+}
+
+
+bool Isolate::MayIndexedAccess(JSObject* receiver,
+ uint32_t index,
+ v8::AccessType type) {
+ ASSERT(receiver->IsAccessCheckNeeded());
+ // Check for compatibility between the security tokens in the
+ // current lexical context and the accessed object.
+ ASSERT(context());
+
+ MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+ if (decision != UNKNOWN) return decision == YES;
+
+ // Get indexed access check callback
+ JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ if (!constructor->shared()->IsApiFunction()) return false;
+
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj == heap_.undefined_value()) return false;
+
+ Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
+ v8::IndexedSecurityCallback callback =
+ v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+
+ if (!callback) return false;
+
+ HandleScope scope(this);
+ Handle<JSObject> receiver_handle(receiver, this);
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+ LOG(this, ApiIndexedSecurityCheck(index));
+ bool result = false;
+ {
+ // Leaving JavaScript.
+ VMState state(this, EXTERNAL);
+ result = callback(v8::Utils::ToLocal(receiver_handle),
+ index,
+ type,
+ v8::Utils::ToLocal(data));
+ }
+ return result;
+}
+
+
+const char* const Isolate::kStackOverflowMessage =
+ "Uncaught RangeError: Maximum call stack size exceeded";
+
+
+Failure* Isolate::StackOverflow() {
+ HandleScope scope;
+ Handle<String> key = factory()->stack_overflow_symbol();
+ Handle<JSObject> boilerplate =
+ Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
+ Handle<Object> exception = Copy(boilerplate);
+ // TODO(1240995): To avoid having to call JavaScript code to compute
+ // the message for stack overflow exceptions which is very likely to
+ // double fault with another stack overflow exception, we use a
+ // precomputed message.
+ DoThrow(*exception, NULL);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::TerminateExecution() {
+ DoThrow(heap_.termination_exception(), NULL);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
+ DoThrow(exception, location);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
+ bool can_be_caught_externally = false;
+ ShouldReportException(&can_be_caught_externally,
+ is_catchable_by_javascript(exception));
+ thread_local_top()->catcher_ = can_be_caught_externally ?
+ try_catch_handler() : NULL;
+
+ // Set the exception being re-thrown.
+ set_pending_exception(exception);
+ return Failure::Exception();
+}
+
+
+Failure* Isolate::ThrowIllegalOperation() {
+ return Throw(heap_.illegal_access_symbol());
+}
+
+
+void Isolate::ScheduleThrow(Object* exception) {
+ // When scheduling a throw we first throw the exception to get the
+ // error reporting if it is uncaught before rescheduling it.
+ Throw(exception);
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+}
+
+
+Failure* Isolate::PromoteScheduledException() {
+ MaybeObject* thrown = scheduled_exception();
+ clear_scheduled_exception();
+ // Re-throw the exception to avoid getting repeated error reporting.
+ return ReThrow(thrown);
+}
+
+
+void Isolate::PrintCurrentStackTrace(FILE* out) {
+ StackTraceFrameIterator it(this);
+ while (!it.done()) {
+ HandleScope scope;
+ // Find code position if recorded in relocation info.
+ JavaScriptFrame* frame = it.frame();
+ int pos = frame->LookupCode()->SourcePosition(frame->pc());
+ Handle<Object> pos_obj(Smi::FromInt(pos));
+ // Fetch function and receiver.
+ Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+ Handle<Object> recv(frame->receiver());
+ // Advance to the next JavaScript frame and determine if the
+ // current frame is the top-level frame.
+ it.Advance();
+ Handle<Object> is_top_level = it.done()
+ ? factory()->true_value()
+ : factory()->false_value();
+ // Generate and print stack trace line.
+ Handle<String> line =
+ Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+ if (line->length() > 0) {
+ line->PrintOn(out);
+ fprintf(out, "\n");
+ }
+ }
+}
+
+
+void Isolate::ComputeLocation(MessageLocation* target) {
+ *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
+ StackTraceFrameIterator it(this);
+ if (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* fun = JSFunction::cast(frame->function());
+ Object* script = fun->shared()->script();
+ if (script->IsScript() &&
+ !(Script::cast(script)->source()->IsUndefined())) {
+ int pos = frame->LookupCode()->SourcePosition(frame->pc());
+ // Compute the location from the function and the reloc info.
+ Handle<Script> casted_script(Script::cast(script));
+ *target = MessageLocation(casted_script, pos, pos + 1);
+ }
+ }
+}
+
+
+bool Isolate::ShouldReportException(bool* can_be_caught_externally,
+ bool catchable_by_javascript) {
+ // Find the top-most try-catch handler.
+ StackHandler* handler =
+ StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+ while (handler != NULL && !handler->is_try_catch()) {
+ handler = handler->next();
+ }
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+
+ // The exception has been externally caught if and only if there is
+ // an external handler which is on top of the top-most try-catch
+ // handler.
+ *can_be_caught_externally = external_handler_address != NULL &&
+ (handler == NULL || handler->address() > external_handler_address ||
+ !catchable_by_javascript);
+
+ if (*can_be_caught_externally) {
+ // Only report the exception if the external handler is verbose.
+ return try_catch_handler()->is_verbose_;
+ } else {
+ // Report the exception if it isn't caught by JavaScript code.
+ return handler == NULL;
+ }
+}
+
+
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
+ ASSERT(!has_pending_exception());
+
+ HandleScope scope;
+ Object* exception_object = Smi::FromInt(0);
+ bool is_object = exception->ToObject(&exception_object);
+ Handle<Object> exception_handle(exception_object);
+
+ // Determine reporting and whether the exception is caught externally.
+ bool catchable_by_javascript = is_catchable_by_javascript(exception);
+ // Only real objects can be caught by JS.
+ ASSERT(!catchable_by_javascript || is_object);
+ bool can_be_caught_externally = false;
+ bool should_report_exception =
+ ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+ bool report_exception = catchable_by_javascript && should_report_exception;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Notify debugger of exception.
+ if (catchable_by_javascript) {
+ debugger_->OnException(exception_handle, report_exception);
+ }
+#endif
+
+ // Generate the message.
+ Handle<Object> message_obj;
+ MessageLocation potential_computed_location;
+ bool try_catch_needs_message =
+ can_be_caught_externally &&
+ try_catch_handler()->capture_message_;
+ if (report_exception || try_catch_needs_message) {
+ if (location == NULL) {
+ // If no location was specified we use a computed one instead
+ ComputeLocation(&potential_computed_location);
+ location = &potential_computed_location;
+ }
+ if (!bootstrapper()->IsActive()) {
+ // It's not safe to try to make message objects or collect stack
+ // traces while the bootstrapper is active since the infrastructure
+ // may not have been properly initialized.
+ Handle<String> stack_trace;
+ if (FLAG_trace_exception) stack_trace = StackTraceString();
+ Handle<JSArray> stack_trace_object;
+ if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
+ stack_trace_object = CaptureCurrentStackTrace(
+ stack_trace_for_uncaught_exceptions_frame_limit_,
+ stack_trace_for_uncaught_exceptions_options_);
+ }
+ ASSERT(is_object); // Can't use the handle unless there's a real object.
+ message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+ location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+ stack_trace_object);
+ }
+ }
+
+ // Save the message for reporting if the the exception remains uncaught.
+ thread_local_top()->has_pending_message_ = report_exception;
+ if (!message_obj.is_null()) {
+ thread_local_top()->pending_message_obj_ = *message_obj;
+ if (location != NULL) {
+ thread_local_top()->pending_message_script_ = *location->script();
+ thread_local_top()->pending_message_start_pos_ = location->start_pos();
+ thread_local_top()->pending_message_end_pos_ = location->end_pos();
+ }
+ }
+
+ // Do not forget to clean catcher_ if currently thrown exception cannot
+ // be caught. If necessary, ReThrow will update the catcher.
+ thread_local_top()->catcher_ = can_be_caught_externally ?
+ try_catch_handler() : NULL;
+
+ // NOTE: Notifying the debugger or generating the message
+ // may have caused new exceptions. For now, we just ignore
+ // that and set the pending exception to the original one.
+ if (is_object) {
+ set_pending_exception(*exception_handle);
+ } else {
+ // Failures are not on the heap so they neither need nor work with handles.
+ ASSERT(exception_handle->IsFailure());
+ set_pending_exception(exception);
+ }
+}
+
+
+bool Isolate::IsExternallyCaught() {
+ ASSERT(has_pending_exception());
+
+ if ((thread_local_top()->catcher_ == NULL) ||
+ (try_catch_handler() != thread_local_top()->catcher_)) {
+ // When throwing the exception, we found no v8::TryCatch
+ // which should care about this exception.
+ return false;
+ }
+
+ if (!is_catchable_by_javascript(pending_exception())) {
+ return true;
+ }
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ ASSERT(external_handler_address != NULL);
+
+ // The exception has been externally caught if and only if there is
+ // an external handler which is on top of the top-most try-finally
+ // handler.
+ // There should be no try-catch blocks as they would prohibit us from
+ // finding external catcher in the first place (see catcher_ check above).
+ //
+ // Note, that finally clause would rethrow an exception unless it's
+ // aborted by jumps in control flow like return, break, etc. and we'll
+ // have another chances to set proper v8::TryCatch.
+ StackHandler* handler =
+ StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+ while (handler != NULL && handler->address() < external_handler_address) {
+ ASSERT(!handler->is_try_catch());
+ if (handler->is_try_finally()) return false;
+
+ handler = handler->next();
+ }
+
+ return true;
+}
+
+
+void Isolate::ReportPendingMessages() {
+ ASSERT(has_pending_exception());
+ PropagatePendingExceptionToExternalTryCatch();
+
+ // If the pending exception is OutOfMemoryException set out_of_memory in
+ // the global context. Note: We have to mark the global context here
+ // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
+ // set it.
+ HandleScope scope;
+ if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ context()->mark_out_of_memory();
+ } else if (thread_local_top_.pending_exception_ ==
+ heap()->termination_exception()) {
+ // Do nothing: if needed, the exception has been already propagated to
+ // v8::TryCatch.
+ } else {
+ if (thread_local_top_.has_pending_message_) {
+ thread_local_top_.has_pending_message_ = false;
+ if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+ HandleScope scope;
+ Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+ if (thread_local_top_.pending_message_script_ != NULL) {
+ Handle<Script> script(thread_local_top_.pending_message_script_);
+ int start_pos = thread_local_top_.pending_message_start_pos_;
+ int end_pos = thread_local_top_.pending_message_end_pos_;
+ MessageLocation location(script, start_pos, end_pos);
+ MessageHandler::ReportMessage(this, &location, message_obj);
+ } else {
+ MessageHandler::ReportMessage(this, NULL, message_obj);
+ }
+ }
+ }
+ }
+ clear_pending_message();
+}
+
+
+void Isolate::TraceException(bool flag) {
+ FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
+}
+
+
+bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+ ASSERT(has_pending_exception());
+ PropagatePendingExceptionToExternalTryCatch();
+
+ // Allways reschedule out of memory exceptions.
+ if (!is_out_of_memory()) {
+ bool is_termination_exception =
+ pending_exception() == heap_.termination_exception();
+
+ // Do not reschedule the exception if this is the bottom call.
+ bool clear_exception = is_bottom_call;
+
+ if (is_termination_exception) {
+ if (is_bottom_call) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
+ }
+ } else if (thread_local_top()->external_caught_exception_) {
+ // If the exception is externally caught, clear it if there are no
+ // JavaScript frames on the way to the C++ frame that has the
+ // external handler.
+ ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+ Address external_handler_address =
+ thread_local_top()->try_catch_handler_address();
+ JavaScriptFrameIterator it;
+ if (it.done() || (it.frame()->sp() > external_handler_address)) {
+ clear_exception = true;
+ }
+ }
+
+ // Clear the exception if needed.
+ if (clear_exception) {
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ return false;
+ }
+ }
+
+ // Reschedule the exception.
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ clear_pending_exception();
+ return true;
+}
+
+
+void Isolate::SetCaptureStackTraceForUncaughtExceptions(
+ bool capture,
+ int frame_limit,
+ StackTrace::StackTraceOptions options) {
+ capture_stack_trace_for_uncaught_exceptions_ = capture;
+ stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
+ stack_trace_for_uncaught_exceptions_options_ = options;
+}
+
+
+bool Isolate::is_out_of_memory() {
+ if (has_pending_exception()) {
+ MaybeObject* e = pending_exception();
+ if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+ return true;
+ }
+ }
+ if (has_scheduled_exception()) {
+ MaybeObject* e = scheduled_exception();
+ if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+Handle<Context> Isolate::global_context() {
+ GlobalObject* global = thread_local_top()->context_->global();
+ return Handle<Context>(global->global_context());
+}
+
+
+Handle<Context> Isolate::GetCallingGlobalContext() {
+ JavaScriptFrameIterator it;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (debug_->InDebugger()) {
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ if (context->global_context() == *debug_->debug_context()) {
+ it.Advance();
+ } else {
+ break;
+ }
+ }
+ }
+#endif // ENABLE_DEBUGGER_SUPPORT
+ if (it.done()) return Handle<Context>::null();
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ return Handle<Context>(context->global_context());
+}
+
+
+char* Isolate::ArchiveThread(char* to) {
+ if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+ RuntimeProfiler::IsolateExitedJS(this);
+ }
+ memcpy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
+ InitializeThreadLocal();
+ return to + sizeof(ThreadLocalTop);
+}
+
+
+char* Isolate::RestoreThread(char* from) {
+ memcpy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
+ // This might be just paranoia, but it seems to be needed in case a
+ // thread_local_top_ is restored on a separate OS thread.
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+ thread_local_top()->simulator_ = Simulator::current(this);
+#elif V8_TARGET_ARCH_MIPS
+ thread_local_top()->simulator_ = Simulator::current(this);
+#endif
+#endif
+ if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+ RuntimeProfiler::IsolateEnteredJS(this);
+ }
+ return from + sizeof(ThreadLocalTop);
+}
+
+
Isolate::ThreadDataTable::ThreadDataTable()
: list_(NULL) {
}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 3754fa2..4bfd3aa 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -527,12 +527,12 @@
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
-static Handle<JSValue> WrapInJSValue(Object* object) {
+static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
Handle<JSFunction> constructor =
Isolate::Current()->opaque_reference_function();
Handle<JSValue> result =
Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
- result->set_value(object);
+ result->set_value(*object);
return result;
}
@@ -599,17 +599,17 @@
}
void SetFunctionCode(Handle<Code> function_code,
Handle<Object> code_scope_info) {
- Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
+ Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
this->SetField(kCodeOffset_, code_wrapper);
- Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
+ Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
void SetOuterScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kOuterScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
- Handle<JSValue> info_holder = WrapInJSValue(*info);
+ Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
int GetParentIndex() {
@@ -666,7 +666,7 @@
Handle<SharedFunctionInfo> info) {
HandleScope scope;
this->SetField(kFunctionNameOffset_, name);
- Handle<JSValue> info_holder = WrapInJSValue(*info);
+ Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedInfoOffset_, info_holder);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
diff --git a/src/macros.py b/src/macros.py
index 69f36c0..28d501f 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -127,7 +127,7 @@
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
-
+macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 68a5062..b56adb6 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -305,13 +305,11 @@
*GetNextCandidateField(candidate) = next_candidate;
}
- STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
static SharedFunctionInfo** GetNextCandidateField(
SharedFunctionInfo* candidate) {
Code* code = candidate->unchecked_code();
return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kHeaderPaddingStart);
+ code->address() + Code::kNextCodeFlushingCandidateOffset);
}
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
diff --git a/src/messages.js b/src/messages.js
index 14cedec..7983350 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -214,7 +214,8 @@
invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
strict_mode_with: ["Strict mode code may not include a with statement"],
strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
- too_many_parameters: ["Too many parameters in function definition"],
+ too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
+ too_many_variables: ["Too many variables declared (only 32767 allowed)"],
strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 5cbeda8..d41b0f1 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -47,97 +47,1567 @@
void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : called function (only guaranteed when
+ // -- extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * agrc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(a1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects a0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Addu(a0, a0, Operand(num_extra_args + 1));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ lw(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ lw(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, zero_reg);
+ __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ Addu(scratch1, result, Operand(JSArray::kSize));
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+ __ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ lw(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ Branch(¬_empty, ne, array_size, Operand(zero_reg));
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ Branch(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(¬_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ li(elements_array_end,
+ (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
+ __ sra(scratch1, array_size, kSmiTagSize);
+ __ Addu(elements_array_end, elements_array_end, scratch1);
+ __ AllocateInNewSpace(
+ elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
+ __ sw(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ And(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ sw(scratch1, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is stored as a smi.
+ ASSERT(kSmiTag == 0);
+ __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ movz(array_size, at, array_size);
+
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ sw(array_size, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: smi-tagged size of elements array
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(elements_array_end, elements_array_storage, elements_array_end);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ sw(scratch1, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// a0: argc
+// a1: constructor (built-in Array function)
+// ra: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in a1 needs to be preserved for
+// entering the generic code. In both cases argc in a0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label* call_generic_code) {
+ Counters* counters = masm->isolate()->counters();
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(v0, a2);
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ Ret();
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ Branch(&argc_two_or_more, ne, a0, Operand(1));
+
+ ASSERT(kSmiTag == 0);
+ __ lw(a2, MemOperand(sp)); // Get the argument from the stack.
+ __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
+ __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ Branch(call_generic_code, ge, a2,
+ Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+
+ // a0: argc
+ // a1: constructor
+ // a2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ true,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
+
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(v0, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
+
+ // a0: argc
+ // a1: constructor
+ // a2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ false,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store.
+ // a0: argc
+ // a3: JSArray
+ // t0: elements_array storage start (untagged)
+ // t1: elements_array_end (untagged)
+ // sp[0]: last argument
+
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ pop(a2);
+ __ Addu(t1, t1, -kPointerSize);
+ __ sw(a2, MemOperand(t1));
+ __ bind(&entry);
+ __ Branch(&loop, lt, t0, Operand(t1));
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // a0: argc
+ // a3: JSArray
+ // sp[0]: receiver
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ mov(v0, a3);
+ __ Ret();
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, a1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function (1)",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function (2)",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function (3)",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function (4)",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+ Register function = a1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+ __ Assert(eq, "Unexpected String function", function, Operand(a2));
+ }
+
+ // Load the first arguments in a0 and get rid of the rest.
+ Label no_arguments;
+ __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+ // First args = sp[(argc - 1) * 4].
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ // sp now point to args[0], drop args[0] + receiver.
+ __ Drop(2);
+
+ Register argument = a2;
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ false, // Is it a Smi?
+ ¬_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
+ __ bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- a2 : argument converted to string
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -----------------------------------
+
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ v0, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
+
+ // Initialising the String Object.
+ Register map = a3;
+ __ LoadGlobalFunctionInitialMap(function, map, t0);
+ if (FLAG_debug_code) {
+ __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Assert(eq, "Unexpected string wrapper instance size",
+ t0, Operand(JSValue::kSize >> kPointerSizeLog2));
+ __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Assert(eq, "Unexpected unused properties of string wrapper",
+ t0, Operand(zero_reg));
+ }
+ __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(¬_cached);
+ __ JumpIfSmi(a0, &convert_argument);
+
+ // Is it a String?
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ __ And(t0, a3, Operand(kIsNotStringMask));
+ __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
+ __ mov(argument, a0);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ __ Branch(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into a2.
+ __ bind(&convert_argument);
+ __ push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ __ EnterInternalFrame();
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ pop(function);
+ __ mov(argument, v0);
+ __ Branch(&argument_is_string);
+
+ // Load the empty string into a2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ Drop(1);
+ __ Branch(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+ __ EnterInternalFrame();
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
+ __ Ret();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ Label non_function_call;
+ // Check that the function is not a smi.
+ __ And(t0, a1, Operand(kSmiTagMask));
+ __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+ // Check that the function is a JSFunction.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Jump to the function-specific construct stub.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(Operand(t9));
+
+ // a0: number of arguments
+ // a1: called object
+ __ bind(&non_function_call);
+ // CALL_NON_FUNCTION expects the non-function constructor as receiver
+ // (instead of the original receiver from the call site). The receiver is
+ // stack element argc.
+ // Set expected number of arguments to zero (not changing a0).
+ __ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
+ Isolate* isolate = masm->isolate();
+
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ __ EnterConstructFrame();
+
+ // Preserve the two incoming parameters on the stack.
+ __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
+ __ MultiPushReversed(a0.bit() | a1.bit());
+
+ // Use t7 to hold undefined, which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+ __ lbu(t0, constructor_count);
+ __ Subu(t0, t0, Operand(1));
+ __ sb(t0, constructor_count);
+ __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+ __ Push(a1, a2);
+
+ __ push(a1); // Constructor.
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(a2);
+ __ pop(a1);
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t4: JSObject (not tagged)
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t6, t4, t0); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ // To allow for truncation.
+ __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(t5, 0));
+ __ addiu(t5, t5, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, Uless, t5, Operand(t6));
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with allocated
+ // object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields and
+ // in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ And(t6,
+ a0,
+ Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+ __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+ __ Addu(a3, a3, Operand(t0));
+ __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+ __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+ __ subu(a3, a3, t0);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: start of next object
+ __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ a0,
+ t5,
+ t6,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (un-tagged)
+ // t4: JSObject
+ // t5: start of next object
+ __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t5);
+ __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+ __ sll(a0, a3, kSmiTagSize);
+ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ sll(t3, a3, kPointerSizeLog2);
+ __ addu(t6, a2, t3); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(a2));
+ __ addiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t6));
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t4, t5);
+ }
+
+ __ bind(&rt_call);
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ __ push(a1); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(t4, v0);
+
+ // Receiver for constructor call allocated.
+ // t4: JSObject
+ __ bind(&allocated);
+ __ push(t4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ MultiPushReversed(a1.bit() | t4.bit());
+
+ // Reload the number of arguments from the stack.
+ // a1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below.
+ __ srl(a0, a3, kSmiTagSize);
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(a3, a3, Operand(-2));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ }
+
+ // Pop the function from the stack.
+ // v0: result
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ Pop();
+
+ // Restore context from the frame.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a3, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ __ LeaveConstructFrame();
+ __ sll(t0, a1, kPointerSizeLog2 - 1);
+ __ Addu(sp, sp, t0);
+ __ Addu(sp, sp, kPointerSize);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ __ Ret();
}
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, false, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from JSEntryStub::GenerateBody
+
+ // ----------- S t a t e -------------
+ // -- a0: code entry
+ // -- a1: function
+ // -- a2: reveiver_pointer
+ // -- a3: argc
+ // -- s0: argv
+ // -----------------------------------
+
+ // Clear the context before we push it when entering the JS frame.
+ __ mov(cp, zero_reg);
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Set up the context from the function argument.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Set up the roots register.
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
+ __ li(s6, Operand(roots_address));
+
+ // Push the function and the receiver onto the stack.
+ __ Push(a1, a2);
+
+ // Copy arguments to the stack in a loop.
+ // a3: argc
+ // s0: argv, ie points to first arg
+ Label loop, entry;
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t2, s0, t0);
+ __ b(&entry);
+ __ nop(); // Branch delay slot nop.
+ // t2 points past last arg.
+ __ bind(&loop);
+ __ lw(t0, MemOperand(s0)); // Read next parameter.
+ __ addiu(s0, s0, kPointerSize);
+ __ lw(t0, MemOperand(t0)); // Dereference handle.
+ __ push(t0); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, s0, Operand(t2));
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t0);
+ __ mov(s2, t0);
+ __ mov(s3, t0);
+ __ mov(s4, t0);
+ __ mov(s5, t0);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code and pass argc as a0.
+ __ mov(a0, a3);
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ }
+
+ __ LeaveInternalFrame();
+
+ __ Jump(ra);
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSEntryTrampolineHelper(masm, true);
}
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(a1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ // Call the runtime function.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(a1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
}
+// These functions are called from C++ but cannot be used in live code.
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // 1. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ { Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ push(t2);
+ __ Addu(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // a0: actual number of arguments
+ Label non_function;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a1, MemOperand(at));
+ __ And(at, a1, Operand(kSmiTagMask));
+ __ Branch(&non_function, eq, at, Operand(zero_reg));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ // a0: actual number of arguments
+ // a1: function
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (shared already in r2).
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kScriptOffset));
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Branch(&shift_arguments, eq, a2, Operand(a3));
+ __ lw(a2, FieldMemOperand(a2, Script::kTypeOffset));
+ __ sra(a2, a2, kSmiTagSize);
+ __ Branch(&shift_arguments, eq, a2, Operand(Script::TYPE_NATIVE));
+
+ // Compute the receiver in non-strict mode.
+ // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+ __ lw(a2, MemOperand(a2, -kPointerSize));
+ // a0: actual number of arguments
+ // a1: function
+ // a2: first argument
+ __ JumpIfSmi(a2, &convert_to_object, t2);
+
+ // Heap::kUndefinedValueRootIndex is already in a3.
+ __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&convert_to_object, lt, a3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&shift_arguments, le, a3, Operand(LAST_JS_OBJECT_TYPE));
+
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ push(a0);
+
+ __ push(a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ __ LeaveInternalFrame();
+ // Restore the function to a1.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a1, MemOperand(at));
+ __ Branch(&patch_receiver);
+
+ // Use the global receiver object from the called function as the
+ // receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a3, sp, at);
+ __ sw(a2, MemOperand(a3, -kPointerSize));
+
+ __ Branch(&shift_arguments);
+ }
+
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // a0: actual number of arguments
+ // a1: function
+ __ bind(&non_function);
+ // Restore the function in case it has been modified.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+ __ sw(a1, MemOperand(a2, -kPointerSize));
+ // Clear a1 to indicate a non-function being called.
+ __ mov(a1, zero_reg);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // a0: actual number of arguments
+ // a1: function
+ __ bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+
+ __ bind(&loop);
+ __ lw(at, MemOperand(a2, -kPointerSize));
+ __ sw(at, MemOperand(a2));
+ __ Subu(a2, a2, Operand(kPointerSize));
+ __ Branch(&loop, ne, a2, Operand(sp));
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Subu(a0, a0, Operand(1));
+ __ Pop();
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ // a0: actual number of arguments
+ // a1: function
+ { Label function;
+ __ Branch(&function, ne, a1, Operand(zero_reg));
+ __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // a0: actual number of arguments
+ // a1: function
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ sra(a2, a2, kSmiTagSize);
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Check formal and actual parameter counts.
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+ ParameterCount expected(0);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION);
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ const int kIndexOffset = -5 * kPointerSize;
+ const int kLimitOffset = -4 * kPointerSize;
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ __ EnterInternalFrame();
+
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison.
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ push(a1);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(v0); // Limit.
+ __ mov(a1, zero_reg); // Initial index.
+ __ push(a1);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ lw(a0, MemOperand(fp, kFunctionOffset));
+ __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a0.
+ __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ lw(a0, MemOperand(fp, kRecvOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (shared already in a1).
+ __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kScriptOffset));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&push_receiver, eq, a1, Operand(a2));
+ __ lw(a1, FieldMemOperand(a1, Script::kTypeOffset));
+ __ sra(a1, a1, kSmiTagSize);
+ __ Branch(&push_receiver, eq, a1, Operand(Script::TYPE_NATIVE));
+
+ // Compute the receiver in non-strict mode.
+ __ And(t0, a0, Operand(kSmiTagMask));
+ __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+ // Heap::kUndefinedValueRootIndex is already in a2.
+ __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&call_to_object, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&push_receiver, le, a1, Operand(LAST_JS_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, kArgsOffset));
+ __ push(a1);
+ __ push(a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+ // Invoke the function.
+ ParameterCount actual(a0);
+ __ sra(a0, a0, kSmiTagSize);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ InvokeFunction(a1, actual, CALL_FUNCTION);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ __ LeaveInternalFrame();
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ sll(a0, a0, kSmiTagSize);
+ __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+ __ Addu(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+ __ mov(sp, fp);
+ __ MultiPop(fp.bit() | ra.bit());
+ __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(sp, sp, t0);
+ // Adjust for the receiver.
+ __ Addu(sp, sp, Operand(kPointerSize));
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // State setup as expected by MacroAssembler::InvokePrologue.
+ // ----------- S t a t e -------------
+ // -- a0: actual arguments count
+ // -- a1: function (passed through to callee)
+ // -- a2: expected arguments count
+ // -- a3: callee code entry
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Branch(&dont_adapt_arguments, eq,
+ a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ // We use Uless as the number of argument should always be greater than 0.
+ __ Branch(&too_few, Uless, a0, Operand(a2));
+
+ { // Enough parameters: actual >= expected.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into a0 and copy end address into a2.
+ __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Addu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address.
+ __ sll(a2, a2, kPointerSizeLog2);
+ __ subu(a2, a0, a2);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: copy end address
+ // a3: code entry to call
+
+ Label copy;
+ __ bind(©);
+ __ lw(t0, MemOperand(a0));
+ __ push(t0);
+ __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a2));
+ __ addiu(a0, a0, -kPointerSize); // In delay slot.
+
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // TODO(MIPS): Optimize these loops.
+
+ // Calculate copy start address into a0 and copy end address is fp.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Addu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address. Also adjust for return address.
+ __ Addu(t1, fp, kPointerSize);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ // t1: copy end address
+ Label copy;
+ __ bind(©);
+ __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
+ __ push(t0);
+ __ Subu(a0, a0, kPointerSize);
+ __ Branch(©, ne, a0, Operand(t1));
+
+ // Fill the remaining expected arguments with undefined.
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sll(t2, a2, kPointerSizeLog2);
+ __ Subu(a2, fp, Operand(t2));
+ __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
+
+ Label fill;
+ __ bind(&fill);
+ __ push(t0);
+ __ Branch(&fill, ne, sp, Operand(a2));
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+
+ __ Call(a3);
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+
+ // -------------------------------------------
+ // Don't adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ Jump(a3);
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index f93868c..89981fd 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -40,24 +40,233 @@
#define __ ACCESS_MASM(masm)
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+ Register scratch1, Register scratch2,
+ Label* not_a_heap_number) {
+ __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+ __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+}
+
void ToNumberStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // The ToNumber stub takes one argument in a0.
+ Label check_heap_number, call_builtin;
+ __ JumpIfNotSmi(a0, &check_heap_number);
+ __ mov(v0, a0);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+ __ mov(v0, a0);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in cp.
+ Label gc;
+
+ // Pop the function info from the stack.
+ __ pop(a3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize,
+ v0,
+ a1,
+ a2,
+ &gc,
+ TAG_OBJECT);
+
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
+ __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Push(cp, a3, t0);
+ __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ v0,
+ a1,
+ a2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ lw(a3, MemOperand(sp, 0));
+
+ // Setup the object header.
+ __ LoadRoot(a2, Heap::kContextMapRootIndex);
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ li(a2, Operand(Smi::FromInt(length)));
+ __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ li(a1, Operand(Smi::FromInt(0)));
+ __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the surrounding context.
+ __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, v0);
+ __ Pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewContext, 1, 1);
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Stack layout on entry:
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ __ lw(a0, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a3, t0);
+ __ lw(a3, MemOperand(t0));
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case, eq, a3, Operand(t1));
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(a3);
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, expected_map_index);
+ __ Assert(eq, message, a3, Operand(at));
+ __ pop(a3);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ // Return new object in v0.
+ __ AllocateInNewSpace(size,
+ v0,
+ a1,
+ a2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(v0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
@@ -107,7 +316,62 @@
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ sra(source_, source_, kSmiTagSize);
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+ // Subtract from 0 if source was negative.
+ __ subu(at, zero_reg, source_);
+ __ movn(source_, at, exponent);
+
+ // We have -1, 0 or 1, which we treat specially. Register source_ contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ Branch(¬_special, gt, source_, Operand(1));
+
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ static const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ // Safe to use 'at' as dest reg here.
+ __ Or(at, exponent, Operand(exponent_word_for_1));
+ __ movn(exponent, at, source_); // Write exp when source not 0.
+ // 1, 0 and -1 all have 0 for the second word.
+ __ mov(mantissa, zero_reg);
+ __ Ret();
+
+ __ bind(¬_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ __ clz(zeros_, source_);
+ // Compute exponent and or it into the exponent register.
+ // We use mantissa as a scratch register here.
+ __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+ __ subu(mantissa, mantissa, zeros_);
+ __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+ __ Or(exponent, exponent, mantissa);
+
+ // Shift up the source chopping the top bit off.
+ __ Addu(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ sllv(source_, source_, zeros_);
+ // Compute lower part of fraction (last 12 bits).
+ __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+ // And the top (top 20 bits).
+ __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ or_(exponent, exponent, source_);
+
+ __ Ret();
}
@@ -115,7 +379,34 @@
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(scratch1, a0, kSmiTagSize);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ sra(scratch1, a1, kSmiTagSize);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
+ if (destination == kCoreRegisters) {
+ __ mfc1(a2, f14);
+ __ mfc1(a3, f15);
+
+ __ mfc1(a0, f12);
+ __ mfc1(a1, f13);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi from a0 to a3 and a2 in double format.
+ __ mov(scratch1, a0);
+ ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
+ __ push(ra);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from a1 to a1 and a0 in double format.
+ __ mov(scratch1, a1);
+ ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ }
}
@@ -126,7 +417,14 @@
Register scratch1,
Register scratch2,
Label* slow) {
- UNIMPLEMENTED_MIPS();
+
+ // Load right operand (a0) to f12 or a2/a3.
+ LoadNumber(masm, destination,
+ a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (a1) to f14 or a0/a1.
+ LoadNumber(masm, destination,
+ a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
}
@@ -140,7 +438,60 @@
Register scratch1,
Register scratch2,
Label* not_number) {
- UNIMPLEMENTED_MIPS();
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+
+ Label is_smi, done;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+ // Handle loading a double from a heap number.
+ if (CpuFeatures::IsSupported(FPU) &&
+ destination == kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double from tagged HeapNumber to double register.
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
+ // point in generating even more instructions.
+ __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ lw(dst2, FieldMemOperand(object,
+ HeapNumber::kValueOffset + kPointerSize));
+ }
+ __ Branch(&done);
+
+ // Handle loading a double from a smi.
+ __ bind(&is_smi);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Convert smi to double using FPU instructions.
+ __ SmiUntag(scratch1, object);
+ __ mtc1(scratch1, dst);
+ __ cvt_d_w(dst, dst);
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ mfc1(dst1, dst);
+ __ mfc1(dst2, FPURegister::from_code(dst.code() + 1));
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write smi to dst1 and dst2 double format.
+ __ mov(scratch1, object);
+ ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+ __ push(ra);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ }
+
+ __ bind(&done);
}
@@ -153,7 +504,40 @@
Register scratch3,
FPURegister double_scratch,
Label* not_number) {
- UNIMPLEMENTED_MIPS();
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ Label is_smi;
+ Label done;
+ Label not_in_int32_range;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+ __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+ __ ConvertToInt32(object,
+ dst,
+ scratch1,
+ scratch2,
+ double_scratch,
+ ¬_in_int32_range);
+ __ jmp(&done);
+
+ __ bind(¬_in_int32_range);
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ __ EmitOutOfInt32RangeTruncate(dst,
+ scratch1,
+ scratch2,
+ scratch3);
+
+ __ jmp(&done);
+
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
+ __ bind(&done);
}
@@ -165,7 +549,76 @@
Register dst2,
Register scratch2,
FPURegister single_scratch) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!int_scratch.is(scratch2));
+
+ Label done;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(int_scratch, single_scratch);
+ __ cvt_d_w(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ mfc1(dst1, double_dst);
+ __ mfc1(dst2, FPURegister::from_code(double_dst.code() + 1));
+ }
+ } else {
+ Label fewer_than_20_useful_bits;
+ // Expected output:
+ // | dst2 | dst1 |
+ // | s | exp | mantissa |
+
+ // Check for zero.
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
+ __ Branch(&done, eq, int_scratch, Operand(zero_reg));
+
+ // Preload the sign of the value.
+ __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+ // Get the absolute value of the object (as an unsigned integer).
+ Label skip_sub;
+ __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+ __ Subu(int_scratch, zero_reg, int_scratch);
+ __ bind(&skip_sub);
+
+ // Get mantisssa[51:20].
+
+ // Get the position of the first set bit.
+ __ clz(dst1, int_scratch);
+ __ li(scratch2, 31);
+ __ Subu(dst1, scratch2, dst1);
+
+ // Set the exponent.
+ __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst2, scratch2,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+ // Clear the first non null bit.
+ __ li(scratch2, Operand(1));
+ __ sllv(scratch2, scratch2, dst1);
+ __ li(at, -1);
+ __ Xor(scratch2, scratch2, at);
+ __ And(int_scratch, int_scratch, scratch2);
+
+ // Get the number of bits to set in the lower part of the mantissa.
+ __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
+ // Set the higher 20 bits of the mantissa.
+ __ srlv(at, int_scratch, scratch2);
+ __ or_(dst2, dst2, at);
+ __ li(at, 32);
+ __ subu(scratch2, at, scratch2);
+ __ sllv(dst1, int_scratch, scratch2);
+ __ Branch(&done);
+
+ __ bind(&fewer_than_20_useful_bits);
+ __ li(at, HeapNumber::kMantissaBitsInTopWord);
+ __ subu(scratch2, at, dst1);
+ __ sllv(scratch2, int_scratch, scratch2);
+ __ Or(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, zero_reg);
+ }
+ __ bind(&done);
}
@@ -180,7 +633,81 @@
Register scratch2,
FPURegister single_scratch,
Label* not_int32) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
+ __ SmiUntag(scratch1, object);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
+ __ Branch(&done);
+
+ __ bind(&obj_is_not_smi);
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double value.
+ __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, double_dst);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion.
+ __ srl(scratch2, scratch2, kFCSRFlagShift);
+ __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+
+ if (destination == kCoreRegisters) {
+ __ mfc1(dst1, double_dst);
+ __ mfc1(dst2, FPURegister::from_code(double_dst.code() + 1));
+ }
+
+ } else {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ // Load the double value in the destination registers.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst2));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+ // Check that the value can be exactly represented by a 32-bit integer.
+ // Jump to not_int32 if that's not the case.
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ }
+
+ __ bind(&done);
}
@@ -193,7 +720,89 @@
Register scratch3,
FPURegister double_scratch,
Label* not_int32) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+ ASSERT(!scratch1.is(scratch2) &&
+ !scratch1.is(scratch3) &&
+ !scratch2.is(scratch3));
+
+ Label done;
+
+ // Untag the object into the destination register.
+ __ SmiUntag(dst, object);
+ // Just return if the object is a smi.
+ __ JumpIfSmi(object, &done);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double value.
+ __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(double_scratch, double_scratch);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion.
+ __ srl(scratch2, scratch2, kFCSRFlagShift);
+ __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ // Get the result in the destination register.
+ __ mfc1(dst, double_scratch);
+
+ } else {
+ // Load the double value in the destination registers.
+ __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
+ __ Or(dst, scratch2, Operand(dst));
+ __ Branch(&done, eq, dst, Operand(zero_reg));
+
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+ // Registers state after DoubleIs32BitInteger.
+ // dst: mantissa[51:20].
+ // scratch2: 1
+
+ // Shift back the higher bits of the mantissa.
+ __ srlv(dst, dst, scratch3);
+ // Set the implicit first bit.
+ __ li(at, 32);
+ __ subu(scratch3, at, scratch3);
+ __ sllv(scratch2, scratch2, scratch3);
+ __ Or(dst, dst, scratch2);
+ // Set the sign.
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Label skip_sub;
+ __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
+ __ Subu(dst, zero_reg, dst);
+ __ bind(&skip_sub);
+ }
+
+ __ bind(&done);
}
@@ -203,7 +812,57 @@
Register dst,
Register scratch,
Label* not_int32) {
- UNIMPLEMENTED_MIPS();
+ // Get exponent alone in scratch.
+ __ Ext(scratch,
+ src1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Substract the bias from the exponent.
+ __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
+
+ // src1: higher (exponent) part of the double value.
+ // src2: lower (mantissa) part of the double value.
+ // scratch: unbiased exponent.
+
+ // Fast cases. Check for obvious non 32-bit integer values.
+ // Negative exponent cannot yield 32-bit integers.
+ __ Branch(not_int32, lt, scratch, Operand(zero_reg));
+ // Exponent greater than 31 cannot yield 32-bit integers.
+ // Also, a positive value with an exponent equal to 31 is outside of the
+ // signed 32-bit integer range.
+ // Another way to put it is that if (exponent - signbit) > 30 then the
+ // number cannot be represented as an int32.
+ Register tmp = dst;
+ __ srl(at, src1, 31);
+ __ subu(tmp, scratch, at);
+ __ Branch(not_int32, gt, tmp, Operand(30));
+ // - Bits [21:0] in the mantissa are not null.
+ __ And(tmp, src2, 0x3fffff);
+ __ Branch(not_int32, ne, tmp, Operand(zero_reg));
+
+ // Otherwise the exponent needs to be big enough to shift left all the
+ // non zero bits left. So we need the (30 - exponent) last bits of the
+ // 31 higher bits of the mantissa to be null.
+ // Because bits [21:0] are null, we can check instead that the
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+ // Get the 32 higher bits of the mantissa in dst.
+ __ Ext(dst,
+ src2,
+ HeapNumber::kMantissaBitsInTopWord,
+ 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+ __ or_(dst, dst, at);
+
+ // Create the mask and test the lower bits (of the higher bits).
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ li(src2, 1);
+ __ sllv(src1, src2, scratch);
+ __ Subu(src1, src1, Operand(1));
+ __ And(src1, dst, src1);
+ __ Branch(not_int32, ne, src1, Operand(zero_reg));
}
@@ -212,18 +871,567 @@
Token::Value op,
Register heap_number_result,
Register scratch) {
- UNIMPLEMENTED_MIPS();
+ // Using core registers:
+ // a0: Left value (least significant part of mantissa).
+ // a1: Left value (sign, exponent, top of mantissa).
+ // a2: Right value (least significant part of mantissa).
+ // a3: Right value (sign, exponent, top of mantissa).
+
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ if (!IsMipsSoftFloatABI) {
+ CpuFeatures::Scope scope(FPU);
+ // We are not using MIPS FPU instructions, and parameters for the runtime
+ // function call are prepaired in a0-a3 registers, but function we are
+ // calling is compiled with hard-float flag and expecting hard float ABI
+ // (parameters in f12/f14 registers). We need to copy parameters from
+ // a0-a3 registers to f12/f14 register pairs.
+ __ mtc1(a0, f12);
+ __ mtc1(a1, f13);
+ __ mtc1(a2, f14);
+ __ mtc1(a3, f15);
+ }
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 4);
+ // Store answer in the overwritable heap number.
+ if (!IsMipsSoftFloatABI) {
+ CpuFeatures::Scope scope(FPU);
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ } else {
+ // Double returned in registers v0 and v1.
+ __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+ __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
+ }
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ mov(v0, heap_number_result);
+ __ pop(ra);
+ __ Ret();
}
// See comment for class, this does NOT work for int32's that are in Smi range.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ // Test sign, and save for later conditionals.
+ __ And(sign_, the_int_, Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ or_(scratch_, scratch_, sign_);
+ // Subtract from 0 if the value was negative.
+ __ subu(at, zero_reg, the_int_);
+ __ movn(the_int_, at, sign_);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ srl(at, the_int_, shift_distance);
+ __ or_(scratch_, scratch_, at);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+ __ Ret();
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(scratch_, zero_reg);
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ Register exp_mask_reg = t5;
+
+ __ Branch(¬_identical, ne, a0, Operand(a1));
+
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cc == less) {
+ __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == greater) {
+ __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+ }
+
+ __ bind(¬_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* slow,
+ bool strict) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ Label lhs_is_smi;
+ __ And(t0, lhs, Operand(kSmiTagMask));
+ __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+ // Rhs is a Smi.
+ // Check whether the non-smi is a heap number.
+ __ GetObjectType(lhs, t4, t4);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal (lhs is already not zero).
+ __ mov(v0, lhs);
+ __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Rhs is a smi, lhs is a number.
+ // Convert smi rhs to double.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(at, rhs, kSmiTagSize);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ } else {
+ // Load lhs to a double in a2, a3.
+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+ __ mov(t6, rhs);
+ ConvertToDoubleStub stub1(a1, a0, t6, t5);
+ __ push(ra);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ pop(ra);
+ }
+
+ // We now have both loaded as doubles.
+ __ jmp(both_loaded_as_doubles);
+
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ GetObjectType(rhs, t4, t4);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal.
+ __ li(v0, Operand(1));
+ __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // Convert smi lhs to double.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(at, lhs, kSmiTagSize);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ // Convert lhs to a double format. t5 is scratch.
+ __ mov(t6, lhs);
+ ConvertToDoubleStub stub2(a3, a2, t6, t5);
+ __ push(ra);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ // Load rhs to a double in a1, a0.
+ if (rhs.is(a0)) {
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ }
+ }
+ // Fall through to both_loaded_as_doubles.
}
void EmitNanCheck(MacroAssembler* masm, Condition cc) {
- UNIMPLEMENTED_MIPS();
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+ __ mfc1(t0, f14); // f14 has LS 32 bits of rhs.
+ __ mfc1(t1, f15); // f15 has MS 32 bits of rhs.
+ __ mfc1(t2, f12); // f12 has LS 32 bits of lhs.
+ __ mfc1(t3, f13); // f13 has MS 32 bits of lhs.
+ } else {
+ // Lhs and rhs are already loaded to GP registers.
+ __ mov(t0, a0); // a0 has LS 32 bits of rhs.
+ __ mov(t1, a1); // a1 has MS 32 bits of rhs.
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+ Register rhs_exponent = exp_first ? t0 : t1;
+ Register lhs_exponent = exp_first ? t2 : t3;
+ Register rhs_mantissa = exp_first ? t1 : t0;
+ Register lhs_mantissa = exp_first ? t3 : t2;
+ Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
+
+ Register exp_mask_reg = t4;
+ __ li(exp_mask_reg, HeapNumber::kExponentMask);
+ __ and_(t5, lhs_exponent, exp_mask_reg);
+ __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
+
+ __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+ __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
+
+ __ li(exp_mask_reg, HeapNumber::kExponentMask);
+ __ bind(&lhs_not_nan_exp_mask_is_loaded);
+ __ and_(t5, rhs_exponent, exp_mask_reg);
+
+ __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
+
+ __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+ __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret(); // Return.
+
+ __ bind(&neither_is_nan);
+}
+
+
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ // f12 and f14 have the two doubles. Neither is a NaN.
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ // We use a call_was and return manually because we need arguments slots to
+ // be freed.
+
+ Label return_result_not_equal, return_result_equal;
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+ __ mfc1(t0, f14); // f14 has LS 32 bits of rhs.
+ __ mfc1(t1, f15); // f15 has MS 32 bits of rhs.
+ __ mfc1(t2, f12); // f12 has LS 32 bits of lhs.
+ __ mfc1(t3, f13); // f13 has MS 32 bits of lhs.
+ } else {
+ // Lhs and rhs are already loaded to GP registers.
+ __ mov(t0, a0); // a0 has LS 32 bits of rhs.
+ __ mov(t1, a1); // a1 has MS 32 bits of rhs.
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+ Register rhs_exponent = exp_first ? t0 : t1;
+ Register lhs_exponent = exp_first ? t2 : t3;
+ Register rhs_mantissa = exp_first ? t1 : t0;
+ Register lhs_mantissa = exp_first ? t3 : t2;
+
+ __ xor_(v0, rhs_mantissa, lhs_mantissa);
+ __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+
+ __ subu(v0, rhs_exponent, lhs_exponent);
+ __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
+ // 0, -0 case.
+ __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
+ __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
+ __ or_(t4, rhs_exponent, lhs_exponent);
+ __ or_(t4, t4, rhs_mantissa);
+
+ __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
+
+ __ bind(&return_result_equal);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+ }
+
+ __ bind(&return_result_not_equal);
+
+ if (!CpuFeatures::IsSupported(FPU)) {
+ __ push(ra);
+ __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
+ if (!IsMipsSoftFloatABI) {
+ // We are not using MIPS FPU instructions, and parameters for the runtime
+ // function call are prepaired in a0-a3 registers, but function we are
+ // calling is compiled with hard-float flag and expecting hard float ABI
+ // (parameters in f12/f14 registers). We need to copy parameters from
+ // a0-a3 registers to f12/f14 register pairs.
+ __ mtc1(a0, f12);
+ __ mtc1(a1, f13);
+ __ mtc1(a2, f14);
+ __ mtc1(a3, f15);
+ }
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+ __ pop(ra); // Because this function returns int, result is in v0.
+ __ Ret();
+ } else {
+ CpuFeatures::Scope scope(FPU);
+ Label equal, less_than;
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&equal);
+ __ nop();
+
+ __ c(OLT, D, f12, f14);
+ __ bc1t(&less_than);
+ __ nop();
+
+ // Not equal, not less, not NaN, must be greater.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
+
+ __ bind(&equal);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&less_than);
+ __ li(v0, Operand(LESS));
+ __ Ret();
+ }
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ // If either operand is a JSObject or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into a2 and compare it with
+ // FIRST_JS_OBJECT_TYPE.
+ __ GetObjectType(lhs, a2, a2);
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Return non-zero.
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ li(v0, Operand(1));
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(t2, a2, Operand(a3));
+ __ And(t0, t2, Operand(kIsSymbolMask));
+ __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ __ GetObjectType(lhs, a3, a2);
+ __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ // If first was a heap number & second wasn't, go to slow case.
+ __ Branch(slow, ne, a3, Operand(a2));
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+ if (rhs.is(a0)) {
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ }
+ }
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ // a2 is object type of lhs.
+ // Ensure that no non-strings have the symbol bit set.
+ Label object_test;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(at, a2, Operand(kIsNotStringMask));
+ __ Branch(&object_test, ne, at, Operand(zero_reg));
+ __ And(at, a2, Operand(kIsSymbolMask));
+ __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ And(at, a3, Operand(kIsSymbolMask));
+ __ Branch(possible_strings, eq, at, Operand(zero_reg));
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ Ret();
+
+ __ bind(&object_test);
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ GetObjectType(rhs, a2, a3);
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ and_(a0, a2, a3);
+ __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Ret();
}
@@ -235,12 +1443,109 @@
Register scratch3,
bool object_is_smi,
Label* not_found) {
- UNIMPLEMENTED_MIPS();
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ __ sra(mask, mask, kSmiTagSize + 1);
+ __ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Isolate* isolate = masm->isolate();
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ true);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ __ lw(scratch1, MemOperand(scratch1, 0));
+ __ Xor(scratch1, scratch1, Operand(scratch2));
+ __ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ __ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ __ lw(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&load_result_from_cache);
+ __ nop(); // bc1t() requires explicit fill of branch delay slot.
+ __ Branch(not_found);
+ } else {
+ // Note that there is no cache check for non-FPU case, even though
+ // it seems there could be. May be a tiny opimization for non-FPU
+ // cores.
+ __ Branch(not_found);
+ }
+ }
+
+ __ bind(&is_smi);
+ Register scratch = scratch1;
+ __ sra(scratch, object, 1); // Shift away the tag.
+ __ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ __ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ Register probe = mask;
+ __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ lw(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ __ IncrementCounter(isolate->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
}
void NumberToStringStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label runtime;
+
+ __ lw(a1, MemOperand(sp, 0));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+ __ Addu(sp, sp, Operand(1 * kPointerSize));
+ __ Ret();
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
}
@@ -248,14 +1553,254 @@
// On exit, v0 is 0, positive, or negative (smi) to indicate the result
// of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+
+
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, ¬_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Subu(v0, a1, a0);
+ __ Ret();
+ __ bind(¬_two_smis);
+ } else if (FLAG_debug_code) {
+ __ Or(a2, a1, a0);
+ __ And(a2, a2, kSmiTagMask);
+ __ Assert(ne, "CompareStub: unexpected smi operands.",
+ a2, Operand(zero_reg));
+ }
+
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ And(t2, lhs_, Operand(rhs_));
+ __ JumpIfNotSmi(t2, ¬_smis, t0);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to rhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison and the numbers have been loaded into f12 and f14 as doubles,
+ // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+ &both_loaded_as_doubles, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // f12, f14 are the double representations of the left hand side
+ // and the right hand side if we have FPU. Otherwise a2, a3 represent
+ // left hand side and a0, a1 represent right hand side.
+
+ Isolate* isolate = masm->isolate();
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ Label nan;
+ __ li(t0, Operand(LESS));
+ __ li(t1, Operand(GREATER));
+ __ li(t2, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ c(UN, D, f12, f14);
+ __ bc1t(&nan);
+ __ nop();
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ movt(v0, t2);
+
+ __ Ret();
+
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc_ == lt || cc_ == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
+ } else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds rhs_not_nan.
+ EmitNanCheck(masm, cc_);
+
+ // Compares two doubles that are not NaNs. Returns the answer.
+ // Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
+
+ __ bind(¬_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ }
+
+ Label check_for_symbols;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case a2 will contain the type of lhs_.
+ EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &flat_string_check);
+
+ __ bind(&check_for_symbols);
+ if (cc_ == eq && !strict_) {
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that a2 is the type of lhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+ if (cc_ == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+ lhs_,
+ rhs_,
+ a2,
+ a3,
+ t0);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs_,
+ rhs_,
+ a2,
+ a3,
+ t0,
+ t1);
+ }
+ // Never falls through to here.
+
+ __ bind(&slow);
+ // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+ // a1 (rhs) second.
+ __ Push(lhs_, rhs_);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result.
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
+ ncr = LESS;
+ }
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
}
// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // This stub uses FPU instructions.
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+
+ Label false_result;
+ Label not_heap_number;
+ Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+ __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // HeapNumber => false if +0, -0, or NaN.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(¬_heap_number, ne, scratch0, Operand(at));
+
+ __ Subu(at, tos_, Operand(kHeapObjectTag));
+ __ ldc1(f12, MemOperand(at, HeapNumber::kValueOffset));
+ __ fcmp(f12, 0.0, UEQ);
+
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ movt(tos_, zero_reg);
+ __ Ret();
+
+ __ bind(¬_heap_number);
+
+ // Check if the value is 'null'.
+ // 'null' => false.
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(at));
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+ __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+ // JavaScript object => true.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Check for string.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+ // String value => false iff empty, i.e., length is zero.
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false.
+ __ bind(&false_result);
+ __ mov(tos_, zero_reg);
+ __ Ret();
}
@@ -267,99 +1812,288 @@
const char* TypeRecordingUnaryOpStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return NULL;
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingUnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRUnaryOpIC::GetName(operand_type_));
+ return name_;
}
// TODO(svenpanne): Use virtual functions instead of switch.
void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ switch (operand_type_) {
+ case TRUnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRUnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRUnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRUnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
}
void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Argument is in a0 and v0 at this point, so we can overwrite a0.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ li(a2, Operand(Smi::FromInt(MinorKey())));
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operand_type_)));
+
+ __ Push(v0, a2, a1, a0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
}
// TODO(svenpanne): Use virtual functions instead of switch.
void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
}
void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
Label* non_smi,
Label* slow) {
- UNIMPLEMENTED_MIPS();
+ __ JumpIfNotSmi(a0, non_smi);
+
+ // The result of negating zero or the smallest negative smi is not a smi.
+ __ And(t0, a0, ~0x80000000);
+ __ Branch(slow, eq, t0, Operand(zero_reg));
+
+ // Return '0 - value'.
+ __ Subu(v0, zero_reg, a0);
+ __ Ret();
}
void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
Label* non_smi) {
- UNIMPLEMENTED_MIPS();
+ __ JumpIfNotSmi(a0, non_smi);
+
+ // Flip bits and revert inverted smi-tag.
+ __ Neg(v0, a0);
+ __ And(v0, v0, ~kSmiTagMask);
+ __ Ret();
}
// TODO(svenpanne): Use virtual functions instead of switch.
void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ switch (op_) {
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
}
-
void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
Label* slow) {
- UNIMPLEMENTED_MIPS();
+ EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+ // a0 is a heap number. Get a new heap number in a1.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ } else {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a1, v0);
+ __ pop(a0);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
+ __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+ __ mov(v0, a1);
+ }
+ __ Ret();
}
void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
MacroAssembler* masm, Label* slow) {
- UNIMPLEMENTED_MIPS();
+ EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+ // Convert the heap number in a0 to an untagged integer in a1.
+ __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ Neg(a1, a1);
+ __ Addu(a2, a1, Operand(0x40000000));
+ __ Branch(&try_float, lt, a2, Operand(zero_reg));
+
+ // Tag the result as a smi and we're done.
+ __ SmiTag(v0, a1);
+ __ Ret();
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(v0, a2, a3, t2, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(a1);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(a1);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ }
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(a1, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ }
}
// TODO(svenpanne): Use virtual functions instead of switch.
void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
}
void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ push(a0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
@@ -372,7 +2106,20 @@
void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label get_result;
+
+ __ Push(a1, a0);
+
+ __ li(a2, Operand(Smi::FromInt(MinorKey())));
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(a2, a1, a0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+ masm->isolate()),
+ 5,
+ 1);
}
@@ -383,12 +2130,57 @@
void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ switch (operands_type_) {
+ case TRBinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRBinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRBinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case TRBinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRBinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case TRBinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case TRBinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case TRBinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
const char* TypeRecordingBinaryOpStub::GetName() {
- UNIMPLEMENTED_MIPS();
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingBinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRBinaryOpIC::GetName(operands_type_));
return name_;
}
@@ -396,7 +2188,156 @@
void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Register left = a1;
+ Register right = a0;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op_) {
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::MUL: {
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(scratch1, right);
+ // Do multiplication.
+ // lo = lower 32 bits of scratch1 * left.
+ // hi = higher 32 bits of scratch1 * left.
+ __ Mult(left, scratch1);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ sra(scratch1, scratch1, 31);
+ __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2));
+ // Go slow on zero result to handle -0.
+ __ mflo(v0);
+ __ Ret(ne, v0, Operand(zero_reg));
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ Addu(scratch2, right, left);
+ Label skip;
+ // ARM uses the 'pl' condition, which is 'ge'.
+ // Negating it results in 'lt'.
+ __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ __ Ret(); // Return smi 0 if the non-zero one was positive.
+ __ bind(&skip);
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ }
+ break;
+ case Token::DIV: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by zero before getting the result.
+ __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividsor (right) is
+ // positive, otherwise it is a -0 case.
+ // Quotient is in 'lo', remainder is in 'hi'.
+ // Check for no remainder first.
+ __ mfhi(scratch1);
+ __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
+ __ mflo(scratch1);
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+ __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ }
+ break;
+ case Token::MOD: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by 0 before calling mfhi.
+ // Check for zero on the right hand side.
+ __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividend (left) is
+ // positive (or 0), otherwise it is a -0 case.
+ // Remainder is in 'hi'.
+ __ mfhi(scratch2);
+ __ Branch(&done, ne, scratch2, Operand(zero_reg));
+ __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch1, scratch2, Operand(0x40000000));
+ __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg));
+ __ SmiTag(v0, scratch2);
+ __ Ret();
+ }
+ break;
+ case Token::BIT_OR:
+ __ Or(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ And(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ Xor(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ srav(scratch1, left, scratch1);
+ // Smi tag result.
+ __ And(v0, scratch1, Operand(~kSmiTagMask));
+ __ Ret();
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srlv(v0, scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ And(scratch1, v0, Operand(0xc0000000));
+ __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg));
+ // Smi tag result.
+ __ SmiTag(v0);
+ __ Ret();
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ sllv(scratch1, scratch1, scratch2);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(¬_smi_result);
}
@@ -404,7 +2345,211 @@
bool smi_operands,
Label* not_numbers,
Label* gc_required) {
- UNIMPLEMENTED_MIPS();
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ Register scratch3 = t0;
+
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands && FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
+ // depending on whether FPU is available or not.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(FPU) &&
+ op_ != Token::MOD ?
+ FloatingPointHelper::kFPURegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ // Allocate new heap number for result.
+ Register result = s0;
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+
+ // Load the operands.
+ if (smi_operands) {
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ } else {
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
+ }
+
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ // Using FPU registers:
+ // f12: Left value.
+ // f14: Right value.
+ CpuFeatures::Scope scope(FPU);
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+ __ mov(v0, result);
+ __ Ret();
+ } else {
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+ op_,
+ result,
+ scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ if (smi_operands) {
+ __ SmiUntag(a3, left);
+ __ SmiUntag(a2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in a2 and left in a3.
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ left,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ not_numbers);
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ right,
+ a2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ not_numbers);
+ }
+ Label result_not_a_smi;
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of
+ // writing the register as an unsigned int so we go to slow case if we
+ // hit this case.
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+ } else {
+ __ Branch(not_numbers, lt, a2, Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Check that the *signed* result fits in a smi.
+ __ Addu(a3, a2, Operand(0x40000000));
+ __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+ __ SmiTag(v0, a2);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ Register result = t1;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ }
+
+ // a2: Answer as signed int32.
+ // t1: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to v0, which is the
+ // result.
+ __ mov(v0, t1);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ // Convert the int32 in a2 to the heap number in a0. As
+ // mentioned above SHR needs to always produce a positive result.
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(a2, f0);
+ if (op_ == Token::SHR) {
+ __ Cvt_d_uw(f0, f0);
+ } else {
+ __ cvt_d_w(f0, f0);
+ }
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in a2 to the heap number in v0, using
+ // a3 and a0 as scratch. v0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+ __ TailCallStub(&stub);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
@@ -416,42 +2561,549 @@
Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
- UNIMPLEMENTED_MIPS();
+ Label not_smis;
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(scratch1, ¬_smis);
+
+ // If the smi-smi operation results in a smi return is generated.
+ GenerateSmiSmiOperation(masm);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
+ }
+ __ bind(¬_smis);
}
void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label not_smis, call_runtime;
+
+ if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+ result_type_ == TRBinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
}
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ FPURegister double_scratch = f0;
+ FPURegister single_scratch = f6;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ Or(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ GenerateSmiSmiOperation(masm);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers a0 and a1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(FPU) &&
+ op_ != Token::MOD ?
+ FloatingPointHelper::kFPURegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ f14,
+ a2,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ f12,
+ t0,
+ t1,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (op_ != Token::DIV) {
+ // These operations produce an integer result.
+ // Try to return a smi if we can.
+ // Otherwise return a heap number if allowed, or jump to type
+ // transition.
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, f10);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion.
+ __ srl(scratch2, scratch2, kFCSRFlagShift);
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ if (result_type_ <= TRBinaryOpIC::INT32) {
+ // If scratch2 != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+ }
+
+ // Check if the result fits in a smi.
+ __ mfc1(scratch1, single_scratch);
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ // If not try to return a heap number.
+ __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+ // Check for minus zero. Return heap number for minus zero.
+ Label not_zero;
+ __ Branch(¬_zero, ne, scratch1, Operand(zero_reg));
+ __ mfc1(scratch2, f11);
+ __ And(scratch2, scratch2, HeapNumber::kSignMask);
+ __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
+ __ bind(¬_zero);
+
+ // Tag the result and return.
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ } else {
+ // DIV just falls through to allocating a heap number.
+ }
+
+ if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
+ : TRBinaryOpIC::INT32) {
+ __ bind(&return_heap_number);
+ // We are using FPU registers so s0 is available.
+ heap_number_result = s0;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+ __ mov(v0, heap_number_result);
+ __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ }
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ // We preserved a0 and a1 to be able to call runtime.
+ // Save the left value on the stack.
+ __ Push(t1, t0);
+
+ Label pop_and_call_runtime;
+
+ // Allocate a heap number to store the result.
+ heap_number_result = s0;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime);
+
+ // Load the left value from the value saved on the stack.
+ __ Pop(a1, a0);
+
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(
+ masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+
+ __ bind(&pop_and_call_runtime);
+ __ Drop(2);
+ __ Branch(&call_runtime);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ Register scratch3 = t1;
+ // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+ // registers a0 and a1 (right and left) are preserved for the runtime
+ // call.
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ left,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ right,
+ a2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (a2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ // The non FPU code does not support this special case, so jump to
+ // runtime if we don't support it.
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ Branch((result_type_ <= TRBinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number,
+ lt,
+ a2,
+ Operand(zero_reg));
+ } else {
+ __ Branch((result_type_ <= TRBinaryOpIC::INT32)
+ ? &transition
+ : &call_runtime,
+ lt,
+ a2,
+ Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ __ And(a2, a2, Operand(0x1f));
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi.
+ __ Addu(scratch1, a2, Operand(0x40000000));
+ // If not try to return a heap number. (We know the result is an int32.)
+ __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+ // Tag the result and return.
+ __ SmiTag(v0, a2);
+ __ Ret();
+
+ __ bind(&return_heap_number);
+ heap_number_result = t1;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ mtc1(a2, double_scratch);
+ __ cvt_d_w(double_scratch, double_scratch);
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ mtc1(a2, double_scratch);
+ __ Cvt_d_uw(double_scratch, double_scratch);
+ }
+
+ // Store the result.
+ __ mov(v0, heap_number_result);
+ __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in a2 to the heap number in v0, using
+ // a3 and a1 as scratch. v0 is preserved and returned.
+ __ mov(a0, t1);
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+ __ TailCallStub(&stub);
+ }
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ if (transition.is_linked()) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&check, ne, a1, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a1, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, ne, a0, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a0, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a0, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateHeapNumberStub(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label call_runtime, call_string_add_or_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ Register left = a1;
+ Register right = a0;
+
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
}
void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
@@ -462,34 +3114,382 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- UNIMPLEMENTED_MIPS();
+
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(a0) && !result.is(a1));
+
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ Branch(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, overwritable_operand);
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode_ == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
}
void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Push(a1, a0);
}
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Untagged case: double input in f4, double result goes
+ // into f4.
+ // Tagged case: tagged input on top of stack and in a0,
+ // tagged result (heap number) goes into v0.
+
+ Label input_not_smi;
+ Label loaded;
+ Label calculate;
+ Label invalid_cache;
+ const Register scratch0 = t5;
+ const Register scratch1 = t3;
+ const Register cache_entry = a0;
+ const bool tagged = (argument_type_ == TAGGED);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ if (tagged) {
+ // Argument is a number and is on stack and in a0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(a0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into a2, a3.
+ __ sra(t0, a0, kSmiTagSize);
+ __ mtc1(t0, f4);
+ __ cvt_d_w(f4, f4);
+ __ mfc1(a2, f4);
+ __ mfc1(a3, f5);
+ __ Branch(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(a0,
+ a1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ true);
+ // Input is a HeapNumber. Store the
+ // low and high words into a2, a3.
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+ } else {
+ // Input is untagged double in f4. Output goes to f4.
+ __ mfc1(a2, f4);
+ __ mfc1(a3, f5);
+ }
+ __ bind(&loaded);
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ Xor(a1, a2, a3);
+ __ sra(t0, a1, 16);
+ __ Xor(a1, a1, t0);
+ __ sra(t0, a1, 8);
+ __ Xor(a1, a1, t0);
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // a1 = TranscendentalCache::hash(double value).
+ __ li(cache_entry, Operand(
+ ExternalReference::transcendental_cache_array_address(
+ masm->isolate())));
+ // a0 points to cache array.
+ __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
+ // a0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+ __ sll(t0, a1, 1);
+ __ Addu(a1, a1, t0);
+ __ sll(t0, a1, 2);
+ __ Addu(cache_entry, cache_entry, t0);
+
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ lw(t0, MemOperand(cache_entry, 0));
+ __ lw(t1, MemOperand(cache_entry, 4));
+ __ lw(t2, MemOperand(cache_entry, 8));
+ __ Addu(cache_entry, cache_entry, 12);
+ __ Branch(&calculate, ne, a2, Operand(t0));
+ __ Branch(&calculate, ne, a3, Operand(t1));
+ // Cache hit. Load result, cleanup and return.
+ if (tagged) {
+ // Pop input value from stack and load result into v0.
+ __ Drop(1);
+ __ mov(v0, t2);
+ } else {
+ // Load result into f4.
+ __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+ }
+ __ Ret();
+ } // if (CpuFeatures::IsSupported(FPU))
+
+ __ bind(&calculate);
+ if (tagged) {
+ __ bind(&invalid_cache);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
+ masm->isolate()),
+ 1,
+ 1);
+ } else {
+ if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+ CpuFeatures::Scope scope(FPU);
+
+ Label no_update;
+ Label skip_cache;
+ const Register heap_number_map = t2;
+
+ // Call C function to calculate the result and update the cache.
+ // Register a0 holds precalculated cache entry address; preserve
+ // it on the stack and pop it into register cache_entry after the
+ // call.
+ __ push(cache_entry);
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(f4);
+
+ // Try to update the cache. If we cannot allocate a
+ // heap number, we return the result without updating.
+ __ pop(cache_entry);
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
+ __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+ __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
+ __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
+ __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+
+ __ mov(v0, cache_entry);
+ __ Ret();
+
+ __ bind(&invalid_cache);
+ // The cache is invalid. Call runtime which will recreate the
+ // cache.
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
+ __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+
+ __ bind(&skip_cache);
+ // Call C function to calculate the result and answer directly
+ // without updating the cache.
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(f4);
+ __ bind(&no_update);
+
+ // We return the value in f4 without adding it to the cache, but
+ // we cause a scavenging GC so that future allocations will succeed.
+ __ EnterInternalFrame();
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ li(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
+ __ Ret();
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+ Register scratch) {
+ __ push(ra);
+ __ PrepareCallCFunction(2, scratch);
+ __ mfc1(v0, f4);
+ __ mfc1(v1, f5);
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ CallCFunction(
+ ExternalReference::math_sin_double_function(masm->isolate()), 2);
+ break;
+ case TranscendentalCache::COS:
+ __ CallCFunction(
+ ExternalReference::math_cos_double_function(masm->isolate()), 2);
+ break;
+ case TranscendentalCache::LOG:
+ __ CallCFunction(
+ ExternalReference::math_log_double_function(masm->isolate()), 2);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ __ pop(ra);
}
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- UNIMPLEMENTED_MIPS();
- return Runtime::kAbort;
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::LOG: return Runtime::kMath_log;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
}
void StackCheckStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
}
void MathPowStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label call_runtime;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ Label base_not_smi;
+ Label exponent_not_smi;
+ Label convert_exponent;
+
+ const Register base = a0;
+ const Register exponent = a2;
+ const Register heapnumbermap = t1;
+ const Register heapnumber = s0; // Callee-saved register.
+ const Register scratch = t2;
+ const Register scratch2 = t3;
+
+ // Alocate FP values in the ABI-parameter-passing regs.
+ const DoubleRegister double_base = f12;
+ const DoubleRegister double_exponent = f14;
+ const DoubleRegister double_result = f0;
+ const DoubleRegister double_scratch = f2;
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ __ lw(base, MemOperand(sp, 1 * kPointerSize));
+ __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ // Convert base to double value and store it in f0.
+ __ JumpIfNotSmi(base, &base_not_smi);
+ // Base is a Smi. Untag and convert it.
+ __ SmiUntag(base);
+ __ mtc1(base, double_scratch);
+ __ cvt_d_w(double_base, double_scratch);
+ __ Branch(&convert_exponent);
+
+ __ bind(&base_not_smi);
+ __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ // Base is a heapnumber. Load it into double register.
+ __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+ __ bind(&convert_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+
+ // The base is in a double register and the exponent is
+ // an untagged smi. Allocate a heap number and call a
+ // C function for integer exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(ra);
+ __ PrepareCallCFunction(3, scratch);
+ // ABI (o32) for func(double d, int x): d in f12, x in a2.
+ ASSERT(double_base.is(f12));
+ ASSERT(exponent.is(a2));
+ if (IsMipsSoftFloatABI) {
+ // Simulator case, supports FPU, but with soft-float passing.
+ __ mfc1(a0, double_base);
+ __ mfc1(a1, FPURegister::from_code(double_base.code() + 1));
+ }
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()), 3);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(v0, heapnumber);
+ __ DropAndRet(2 * kPointerSize);
+
+ __ bind(&exponent_not_smi);
+ __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ // Exponent is a heapnumber. Load it into double register.
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+ // The base and the exponent are in double registers.
+ // Allocate a heap number and call a C function for
+ // double exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch);
+ // ABI (o32) for func(double a, double b): a in f12, b in f14.
+ ASSERT(double_base.is(f12));
+ ASSERT(double_exponent.is(f14));
+ if (IsMipsSoftFloatABI) {
+ __ mfc1(a0, double_base);
+ __ mfc1(a1, FPURegister::from_code(double_base.code() + 1));
+ __ mfc1(a2, double_exponent);
+ __ mfc1(a3, FPURegister::from_code(double_exponent.code() + 1));
+ }
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(v0, heapnumber);
+ __ DropAndRet(2 * kPointerSize);
+ }
+
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
}
@@ -499,13 +3499,13 @@
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Throw(v0);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
- UNIMPLEMENTED_MIPS();
+ __ ThrowUncatchable(type, v0);
}
@@ -515,17 +3515,369 @@
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
- UNIMPLEMENTED_MIPS();
+ // v0: result parameter for PerformGC, if any
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to the first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ if (do_gc) {
+ // Move result passed in v0 into a0 to call PerformGC.
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, a1);
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(masm->isolate()), 1);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+ if (always_allocate) {
+ __ li(a0, Operand(scope_depth));
+ __ lw(a1, MemOperand(a0));
+ __ Addu(a1, a1, Operand(1));
+ __ sw(a1, MemOperand(a0));
+ }
+
+ // Prepare arguments for C routine: a0 = argc, a1 = argv
+ __ mov(a0, s0);
+ __ mov(a1, s1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+
+ // From arm version of this function:
+ // TODO(1242173): To let the GC traverse the return address of the exit
+ // frames, we need to know where the return address is. Right now,
+ // we push it on the stack to be able to find it again, but we never
+ // restore from it in case of changes, which makes it impossible to
+ // support moving the C entry code stub. This should be fixed, but currently
+ // this is OK because the CEntryStub gets generated so early in the V8 boot
+ // sequence that it is not moving ever.
+
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ // This branch-and-link sequence is needed to find the current PC on mips,
+ // saved to the ra register.
+ // Use masm-> here instead of the double-underscore macro since extra
+ // coverage code can interfere with the proper calculation of ra.
+ Label find_ra;
+ masm->bal(&find_ra); // bal exposes branch delay slot.
+ masm->nop(); // Branch delay slot nop.
+ masm->bind(&find_ra);
+
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jalr(t9)), and push it.
+ // This is the return address of the exit frame.
+ const int kNumInstructionsToJump = 6;
+ masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
+ masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+ // Stack is still aligned.
+
+ // Call the C routine.
+ masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ masm->jalr(t9);
+ masm->nop(); // Branch delay slot nop.
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump,
+ masm->InstructionsGeneratedSince(&find_ra));
+ }
+
+ // Restore stack (remove arg slots).
+ __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+
+ if (always_allocate) {
+ // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
+ __ li(a2, Operand(scope_depth));
+ __ lw(a3, MemOperand(a2));
+ __ Subu(a3, a3, Operand(1));
+ __ sw(a3, MemOperand(a2));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ addiu(a2, v0, 1);
+ __ andi(t0, a2, kFailureTagMask);
+ __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+
+ // Exit C frame and return.
+ // v0:v1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ __ LeaveExitFrame(save_doubles_, s0);
+ __ Ret();
+
+ // Check if we should retry or throw exception.
+ Label retry;
+ __ bind(&failure_returned);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+ __ Branch(&retry, eq, t0, Operand(zero_reg));
+
+ // Special handling of out of memory exceptions.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ Branch(throw_out_of_memory_exception, eq,
+ v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+ // Retrieve the pending exception and clear the variable.
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a3, MemOperand(t0));
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ lw(v0, MemOperand(t0));
+ __ sw(a3, MemOperand(t0));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ Branch(throw_termination_exception, eq,
+ v0, Operand(masm->isolate()->factory()->termination_exception()));
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry);
+ // Last failure (v0) will be moved to (a0) for parameter when retrying.
}
void CEntryStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
+ __ Addu(s1, sp, s1);
+ __ Subu(s1, s1, Operand(kPointerSize));
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(save_doubles_);
+
+ // Setup argc and the builtin function in callee-saved registers.
+ __ mov(s0, a0);
+ __ mov(s2, a1);
+
+ // s0: number of arguments (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
}
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- UNIMPLEMENTED_MIPS();
+ Label invoke, exit;
+
+ // Registers:
+ // a0: entry address
+ // a1: function
+ // a2: reveiver
+ // a3: argc
+ //
+ // Stack:
+ // 4 args slots
+ // args
+
+ // Save callee saved registers on the stack.
+ __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+ // Load argv in s0 register.
+ __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
+ StandardFrameConstants::kCArgsSlotsSize));
+
+ // We build an EntryFrame.
+ __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ li(t2, Operand(Smi::FromInt(marker)));
+ __ li(t1, Operand(Smi::FromInt(marker)));
+ __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+ masm->isolate())));
+ __ lw(t0, MemOperand(t0));
+ __ Push(t3, t2, t1, t0);
+ // Setup frame pointer for the frame to be pushed.
+ __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xff...f) |
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ #ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+ masm->isolate());
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ lw(t2, MemOperand(t1));
+ {
+ Label skip;
+ __ Branch(&skip, ne, t2, Operand(zero_reg));
+ __ sw(fp, MemOperand(t1));
+ __ bind(&skip);
+ }
+ #endif
+
+ // Call a faked try-block that does the invoke.
+ __ bal(&invoke); // bal exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
+ __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(t1, MemOperand(t0));
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ masm->isolate());
+ __ li(t0, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+ __ li(t0, Operand(entry));
+ }
+ __ lw(t9, MemOperand(t0)); // Deref address.
+
+ // Call JSEntryTrampoline.
+ __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(t9);
+
+ // Unlink this frame from the handler chain. When reading the
+ // address of the next handler, there is no need to use the address
+ // displacement since the current stack pointer (sp) points directly
+ // to the stack handler.
+ __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ __ li(t0, Operand(ExternalReference(Isolate::k_handler_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // This restores sp to its position before PushTryHandler.
+ __ addiu(sp, sp, StackHandlerConstants::kSize);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current FP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ lw(t2, MemOperand(t1));
+ {
+ Label skip;
+ __ Branch(&skip, ne, fp, Operand(t2));
+ __ sw(zero_reg, MemOperand(t1));
+ __ bind(&skip);
+ }
+#endif
+
+ __ bind(&exit); // v0 holds result.
+ // Restore the top frame descriptors from the stack.
+ __ pop(t1);
+ __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // Reset the stack to the callee saved registers.
+ __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+ // Return.
+ __ Jump(ra);
}
@@ -534,58 +3886,1008 @@
// a1 (or at sp), depending on whether or not
// args_in_registers() is true.
void InstanceofStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Fixed register usage throughout the stub:
+ const Register object = a0; // Object (lhs).
+ const Register map = a3; // Map of the object.
+ const Register function = a1; // Function (rhs).
+ const Register prototype = t0; // Prototype of the function.
+ const Register scratch = a2;
+ Label slow, loop, is_instance, is_not_instance, not_js_object;
+ if (!HasArgsInRegisters()) {
+ __ lw(object, MemOperand(sp, 1 * kPointerSize));
+ __ lw(function, MemOperand(sp, 0));
+ }
+
+ // Check that the left hand is a JS object and load map.
+ __ JumpIfSmi(object, ¬_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
+
+ // Look up the function and the map in the instanceof cache.
+ Label miss;
+ __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(t1));
+ __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(t1));
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&miss);
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Register mapping: a3 is object map and t0 is function prototype.
+ // Get prototype of object into a2.
+ __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ bind(&loop);
+ __ Branch(&is_instance, eq, scratch, Operand(prototype));
+ __ LoadRoot(t1, Heap::kNullValueRootIndex);
+ __ Branch(&is_not_instance, eq, scratch, Operand(t1));
+ __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ __ Branch(&loop);
+
+ __ bind(&is_instance);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&is_not_instance);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(¬_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ GetObjectType(function, map, scratch);
+ __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Null is not instance of anything.
+ __ Branch(&object_not_null, ne, scratch,
+ Operand(masm->isolate()->factory()->null_value()));
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch, &slow);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ if (HasArgsInRegisters()) {
+ __ Push(a0, a1);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smiGenerateReadElement.
+ Label slow;
+ __ JumpIfNotSmi(a1, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check index (a1) against formal parameters count limit passed in
+ // through register a0. Use unsigned comparison to get negative
+ // check for free.
+ __ Branch(&slow, hs, a1, Operand(a0));
+
+ // Read the argument from the stack and return it.
+ __ subu(a3, a0, a1);
+ __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, fp, Operand(t3));
+ __ lw(v0, MemOperand(a3, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index (a1) against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+ // Read the argument from the adaptor frame and return it.
+ __ subu(a3, a0, a1);
+ __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(t3));
+ __ lw(v0, MemOperand(a3, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(a1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Get the length from the frame.
+ __ lw(a1, MemOperand(sp, 0));
+ __ Branch(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a1, MemOperand(sp, 0));
+ __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(at));
+
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+ __ srl(a1, a1, kSmiTagSize);
+
+ __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(
+ a1,
+ v0,
+ a2,
+ a3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (global) context.
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ __ lw(t0, MemOperand(t0,
+ Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+
+ // Copy the JS object part.
+ __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+ if (type_ == NEW_NON_STRICT) {
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+ }
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+
+ Label done;
+ __ Branch(&done, eq, a1, Operand(zero_reg));
+
+ // Get the parameters pointer from the stack.
+ __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+ __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop.
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Setup t0 to point to the first array slot.
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ lw(a3, MemOperand(a2));
+ // Post-increment t0 with kPointerSize on each iteration.
+ __ sw(a3, MemOperand(t0));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 0 * kPointerSize;
+ static const int kPreviousIndexOffset = 1 * kPointerSize;
+ static const int kSubjectOffset = 2 * kPointerSize;
+ static const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ // MIPS - using s0..s2, since we are not using CEntry Stub.
+ Register subject = s0;
+ Register regexp_data = s1;
+ Register last_match_info_elements = s2;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(
+ masm->isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+ __ li(a0, Operand(address_of_regexp_stack_memory_size));
+ __ lw(a0, MemOperand(a0, 0));
+ __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+ // Check that the first argument is a JSRegExp object.
+ __ lw(a0, MemOperand(sp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ Check(nz,
+ "Unexpected type for RegExp data, FixedArray expected",
+ t0,
+ Operand(zero_reg));
+ __ GetObjectType(regexp_data, a0, a0);
+ __ Check(eq,
+ "Unexpected type for RegExp data, FixedArray expected",
+ a0,
+ Operand(FIXED_ARRAY_TYPE));
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ lw(a2,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ Addu(a2, a2, Operand(2)); // a2 was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+
+ // a2: Number of capture registers
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the second argument is a string.
+ __ lw(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ GetObjectType(subject, a0, a0);
+ __ And(a0, a0, Operand(kIsNotStringMask));
+ STATIC_ASSERT(kStringTag == 0);
+ __ Branch(&runtime, ne, a0, Operand(zero_reg));
+
+ // Get the length of the string to r3.
+ __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
+
+ // a2: Number of capture registers
+ // a3: Length of subject string as a smi
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
+ __ And(at, a0, Operand(kSmiTagMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ Branch(&runtime, ls, a3, Operand(a0));
+
+ // a2: Number of capture registers
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the fourth object is a JSArray object.
+ __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ lw(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ Branch(&runtime, ne, a0, Operand(
+ masm->isolate()->factory()->fixed_array_map()));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ lw(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
+ __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
+ __ Branch(&runtime, gt, a2, Operand(at));
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string;
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // First check for flat string.
+ __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ Branch(&seq_string, eq, at, Operand(zero_reg));
+
+ // subject: Subject string
+ // a0: instance type if Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(a1));
+ __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, a0, Operand(kStringRepresentationMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ __ bind(&seq_string);
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // a0: Instance type of subject string
+ STATIC_ASSERT(kStringEncodingMask == 4);
+ STATIC_ASSERT(kAsciiStringTag == 4);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Find the code object based on the assumptions above.
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
+ __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+ __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
+ __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+ __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it
+ // contains the hole.
+ __ GetObjectType(t9, a0, a0);
+ __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
+
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
+
+ // a1: previous index
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+ 1, a0, a2);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
+ static const int kParameterRegisters = 4;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers, meaning we
+ // treat the return address as argument 5. Thus every argument after that
+ // needs to be shifted back by 1. Since DirectCEntryStub will handle
+ // allocating space for the c argument slots, we don't need to calculate
+ // that into the argument positions on the stack. This is how the stack will
+ // look (sp meaning the value of sp at this moment):
+ // [sp + 4] - Argument 8
+ // [sp + 3] - Argument 7
+ // [sp + 2] - Argument 6
+ // [sp + 1] - Argument 5
+ // [sp + 0] - saved ra
+
+ // Argument 8: Pass current isolate address.
+ // CFunctionArgumentOperand handles MIPS stack argument slots.
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ li(a0, Operand(1));
+ __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ lw(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ lw(a2, MemOperand(a2, 0));
+ __ addu(a0, a0, a2);
+ __ sw(a0, MemOperand(sp, 2 * kPointerSize));
+
+ // Argument 5: static offsets vector buffer.
+ __ li(a0, Operand(
+ ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+ __ sw(a0, MemOperand(sp, 1 * kPointerSize));
+
+ // For arguments 4 and 3 get string length, calculate start of string data
+ // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
+ __ sra(a0, a0, kSmiTagSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
+ // Argument 4 (a3): End of string data
+ // Argument 3 (a2): Start of string data
+ __ sllv(t1, a1, a3);
+ __ addu(a2, t0, t1);
+ __ sllv(t1, a0, a3);
+ __ addu(a3, t0, t1);
+
+ // Argument 2 (a1): Previous index.
+ // Already there
+
+ // Argument 1 (a0): Subject string.
+ __ mov(a0, subject);
+
+ // Locate the code entry and call it.
+ __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, t9);
+
+ __ LeaveExitFrame(false, no_reg);
+
+ // v0: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+
+ // Check the result.
+
+ Label success;
+ __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ Label failure;
+ __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ li(a1, Operand(
+ ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a1, MemOperand(a1, 0));
+ __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ lw(v0, MemOperand(a2, 0));
+ __ Branch(&runtime, eq, v0, Operand(a1));
+
+ __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+ Label termination_exception;
+ __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+ __ Throw(a0); // Expects thrown value in v0.
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
+
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+ __ Addu(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ lw(a1,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ Addu(a1, a1, Operand(2)); // a1 was a smi.
+
+ // a1: number of capture registers
+ // subject: subject string
+ // Store the capture count.
+ __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
+ __ sw(a2, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
+ __ sw(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+ __ sw(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ mov(a3, last_match_info_elements);
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(masm->isolate());
+ __ li(a2, Operand(address_of_static_offsets_vector));
+
+ // a1: number of capture registers
+ // a2: offsets vector
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wrapping after zero.
+ __ Addu(a0,
+ last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ bind(&next_capture);
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&done, lt, a1, Operand(zero_reg));
+ // Read the value from the static offsets vector buffer.
+ __ lw(a3, MemOperand(a2, 0));
+ __ addiu(a2, a2, kPointerSize);
+ // Store the smi value in the last match info.
+ __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
+ __ sw(a3, MemOperand(a0, 0));
+ __ Branch(&next_capture, USE_DELAY_SLOT);
+ __ addiu(a0, a0, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+
+ // Return last match info.
+ __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+ __ Addu(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
}
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ const int kMaxInlineLength = 100;
+ Label slowcase;
+ Label done;
+ __ lw(a1, MemOperand(sp, kPointerSize * 2));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ JumpIfNotSmi(a1, &slowcase);
+ __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
+ // Smi-tagging is equivalent to multiplying by 2.
+ // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
+ // Elements: [Map][Length][..elements..]
+ // Size of JSArray with two in-object properties and the header of a
+ // FixedArray.
+ int objects_size =
+ (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+ __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
+ __ Addu(a2, t1, Operand(objects_size));
+ __ AllocateInNewSpace(
+ a2, // In: Size, in words.
+ v0, // Out: Start of allocation (tagged).
+ a3, // Scratch register.
+ t0, // Scratch register.
+ &slowcase,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ // v0: Start of allocated area, object-tagged.
+ // a1: Number of elements in array, as smi.
+ // t1: Number of elements, untagged.
+
+ // Set JSArray map to global.regexp_result_map().
+ // Set empty properties FixedArray.
+ // Set elements to point to FixedArray allocated right after the JSArray.
+ // Interleave operations for better latency.
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
+ __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ // Set input, index and length fields from arguments.
+ __ lw(a1, MemOperand(sp, kPointerSize * 0));
+ __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
+ __ lw(a1, MemOperand(sp, kPointerSize * 1));
+ __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+ __ lw(a1, MemOperand(sp, kPointerSize * 2));
+ __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+
+ // Fill out the elements FixedArray.
+ // v0: JSArray, tagged.
+ // a3: FixedArray, tagged.
+ // t1: Number of elements in array, untagged.
+
+ // Set map.
+ __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+ // Set FixedArray length.
+ __ sll(t2, t1, kSmiTagSize);
+ __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ // Fill contents of fixed-array with the-hole.
+ __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // Fill fixed array elements with hole.
+ // v0: JSArray, tagged.
+ // a2: the hole.
+ // a3: Start of elements in FixedArray.
+ // t1: Number of elements to fill.
+ Label loop;
+ __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
+ __ addu(t1, t1, a3); // Point past last element to store.
+ __ bind(&loop);
+ __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
+ __ sw(a2, MemOperand(a3));
+ __ Branch(&loop, USE_DELAY_SLOT);
+ __ addiu(a3, a3, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slowcase);
+ __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check
+ // for this and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ Label receiver_is_value, receiver_is_js_object;
+ __ lw(a1, MemOperand(sp, argc_ * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(a1, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&receiver_is_js_object,
+ ge,
+ a2,
+ Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ // We need natives to execute this.
+ __ EnterInternalFrame();
+ __ push(a1);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ sw(v0, MemOperand(sp, argc_ * kPointerSize));
+
+ __ bind(&receiver_is_js_object);
+ }
+
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &slow);
+ // Get the map of the function object.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Fast-case: Invoke the function now.
+ // a1: pushed function
+ ParameterCount actual(argc_);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ li(a0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
}
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
- UNIMPLEMENTED_MIPS();
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
+ const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == eq || cc_ == ne)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ const char* include_smi_compare_name = "";
+ if (!include_smi_compare_) {
+ include_smi_compare_name = "_NO_SMI";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s%s%s",
+ cc_name,
+ lhs_name,
+ rhs_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name,
+ include_smi_compare_name);
return name_;
}
int CompareStub::MinorKey() {
- UNIMPLEMENTED_MIPS();
- return 0;
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(lhs_.is(a0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
}
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ ASSERT(!t0.is(scratch_));
+ ASSERT(!t0.is(index_));
+ ASSERT(!t0.is(result_));
+ ASSERT(!t0.is(object_));
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ And(t0, result_, Operand(kIsNotStringMask));
+ __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
+ __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(t0, result_, Operand(kStringRepresentationMask));
+ __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+
+ // Handle non-flat strings.
+ __ And(t0, result_, Operand(kIsConsStringMask));
+ __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+ // Get the first of the two strings and load its instance type.
+ __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ And(t0, result_, Operand(kStringRepresentationMask));
+ __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ And(t0, result_, Operand(kStringEncodingMask));
+ __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ Addu(scratch_, object_, Operand(scratch_));
+ __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ Branch(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+
+ __ srl(t0, scratch_, kSmiTagSize);
+ __ Addu(scratch_, object_, t0);
+
+ __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ __ sll(result_, result_, kSmiTagSize);
+ __ bind(&exit_);
}
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- UNIMPLEMENTED_MIPS();
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ true);
+ call_helper.BeforeCall(masm);
+ // Consumed by runtime conversion function:
+ __ Push(object_, index_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+
+ __ Move(scratch_, v0);
+
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ Branch(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
}
@@ -593,13 +4895,46 @@
// StringCharFromCodeGenerator
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+ ASSERT(!t0.is(result_));
+ ASSERT(!t0.is(code_));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ And(t0,
+ code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(result_, result_, t0);
+ __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case_, eq, result_, Operand(t0));
+ __ bind(&exit_);
}
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- UNIMPLEMENTED_MIPS();
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ Branch(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
}
@@ -607,13 +4942,15 @@
// StringCharAtGenerator
void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
}
void StringCharAtGenerator::GenerateSlow(
MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
- UNIMPLEMENTED_MIPS();
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -687,7 +5024,24 @@
Register count,
Register scratch,
bool ascii) {
- UNIMPLEMENTED_MIPS();
+ Label loop;
+ Label done;
+ // This loop just copies one character at a time, as it is only used for
+ // very short strings.
+ if (!ascii) {
+ __ addu(count, count, count);
+ }
+ __ Branch(&done, eq, count, Operand(zero_reg));
+ __ addu(count, dest, count); // Count now points to the last dest byte.
+
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ Branch(&loop, lt, dest, Operand(count));
+
+ __ bind(&done);
}
@@ -707,7 +5061,105 @@
Register scratch4,
Register scratch5,
int flags) {
- UNIMPLEMENTED_MIPS();
+ bool ascii = (flags & COPY_ASCII) != 0;
+ bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+ if (dest_always_aligned && FLAG_debug_code) {
+ // Check that destination is actually word aligned if the flag says
+ // that it is.
+ __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+ __ Check(eq,
+ "Destination of copy not aligned.",
+ scratch4,
+ Operand(zero_reg));
+ }
+
+ const int kReadAlignment = 4;
+ const int kReadAlignmentMask = kReadAlignment - 1;
+ // Ensure that reading an entire aligned word containing the last character
+ // of a string will not read outside the allocated area (because we pad up
+ // to kObjectAlignment).
+ STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+
+ if (!ascii) {
+ __ addu(count, count, count);
+ }
+ __ Branch(&done, eq, count, Operand(zero_reg));
+
+ Label byte_loop;
+ // Must copy at least eight bytes, otherwise just do it one byte at a time.
+ __ Subu(scratch1, count, Operand(8));
+ __ Addu(count, dest, Operand(count));
+ Register limit = count; // Read until src equals this.
+ __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
+
+ if (!dest_always_aligned) {
+ // Align dest by byte copying. Copies between zero and three bytes.
+ __ And(scratch4, dest, Operand(kReadAlignmentMask));
+ Label dest_aligned;
+ __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
+ Label aligned_loop;
+ __ bind(&aligned_loop);
+ __ lbu(scratch1, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch1, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ addiu(scratch4, scratch4, 1);
+ __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
+ __ bind(&dest_aligned);
+ }
+
+ Label simple_loop;
+
+ __ And(scratch4, src, Operand(kReadAlignmentMask));
+ __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+ // Loop for src/dst that are not aligned the same way.
+ // This loop uses lwl and lwr instructions. These instructions
+ // depend on the endianness, and the implementation assumes little-endian.
+ {
+ Label loop;
+ __ bind(&loop);
+ __ lwr(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ lwl(scratch1, MemOperand(src, -1));
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+ __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ }
+
+ __ Branch(&byte_loop);
+
+ // Simple loop.
+ // Copy words from src to dest, until less than four bytes left.
+ // Both src and dest are word aligned.
+ __ bind(&simple_loop);
+ {
+ Label loop;
+ __ bind(&loop);
+ __ lw(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+ __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ }
+
+ // Copy bytes from src to dest until dest hits limit.
+ __ bind(&byte_loop);
+ // Test if dest has already reached the limit.
+ __ Branch(&done, ge, dest, Operand(limit));
+ __ lbu(scratch1, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch1, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ Branch(&byte_loop);
+
+ __ bind(&done);
}
@@ -720,32 +5172,434 @@
Register scratch4,
Register scratch5,
Label* not_found) {
- UNIMPLEMENTED_MIPS();
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ Subu(scratch, c1, Operand(static_cast<int>('0')));
+ __ Branch(¬_array_index,
+ Ugreater,
+ scratch,
+ Operand(static_cast<int>('9' - '0')));
+ __ Subu(scratch, c2, Operand(static_cast<int>('0')));
+
+ // If check failed combine both characters into single halfword.
+ // This is required by the contract of the method: code at the
+ // not_found branch expects this combination in c1 register.
+ Label tmp;
+ __ sll(scratch1, c2, kBitsPerByte);
+ __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
+ __ Or(c1, c1, scratch1);
+ __ bind(&tmp);
+ __ Branch(not_found,
+ Uless_equal,
+ scratch,
+ Operand(static_cast<int>('9' - '0')));
+
+ __ bind(¬_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ StringHelper::GenerateHashInit(masm, hash, c1);
+ StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+ StringHelper::GenerateHashGetHash(masm, hash);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ sll(scratch, c2, kBitsPerByte);
+ __ Or(chars, chars, scratch);
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load symbol table.
+ // Load address of first element of the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ sra(mask, mask, 1);
+ __ Addu(mask, mask, -1);
+
+ // Calculate untagged address of the first element of the symbol table.
+ Register first_symbol_table_element = symbol_table;
+ __ Addu(first_symbol_table_element, symbol_table,
+ Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+ // Registers.
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // mask: capacity mask
+ // first_symbol_table_element: address of the first element of
+ // the symbol table
+ // undefined: the undefined object
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ Register candidate = scratch5; // Scratch register contains candidate.
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ if (i > 0) {
+ __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ } else {
+ __ mov(candidate, hash);
+ }
+
+ __ And(candidate, candidate, Operand(mask));
+
+ // Load the entry from the symble table.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ sll(scratch, candidate, kPointerSizeLog2);
+ __ Addu(scratch, scratch, first_symbol_table_element);
+ __ lw(candidate, MemOperand(scratch));
+
+ // If entry is undefined no string with this hash can be found.
+ Label is_string;
+ __ GetObjectType(candidate, scratch, scratch);
+ __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
+
+ __ Branch(not_found, eq, undefined, Operand(candidate));
+ // Must be null (deleted entry).
+ if (FLAG_debug_code) {
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ Assert(eq, "oddball in symbol table is not undefined or null",
+ scratch, Operand(candidate));
+ }
+ __ jmp(&next_probe[i]);
+
+ __ bind(&is_string);
+
+ // Check that the candidate is a non-external ASCII string. The instance
+ // type is still in the scratch register from the CompareObjectType
+ // operation.
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
+
+ // If length is not 2 the string is not a candidate.
+ __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+ __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
+
+ // Check if the two characters match.
+ // Assumes that word load is little endian.
+ __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = candidate;
+ __ bind(&found_in_symbol_table);
+ __ mov(v0, result);
}
void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
- UNIMPLEMENTED_MIPS();
+ // hash = character + (character << 10);
+ __ sll(hash, character, 10);
+ __ addu(hash, hash, character);
+ // hash ^= hash >> 6;
+ __ sra(at, hash, 6);
+ __ xor_(hash, hash, at);
}
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character) {
- UNIMPLEMENTED_MIPS();
+ // hash += character;
+ __ addu(hash, hash, character);
+ // hash += hash << 10;
+ __ sll(at, hash, 10);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 6;
+ __ sra(at, hash, 6);
+ __ xor_(hash, hash, at);
}
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Register hash) {
- UNIMPLEMENTED_MIPS();
+ // hash += hash << 3;
+ __ sll(at, hash, 3);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 11;
+ __ sra(at, hash, 11);
+ __ xor_(hash, hash, at);
+ // hash += hash << 15;
+ __ sll(at, hash, 15);
+ __ addu(hash, hash, at);
+
+ // if (hash == 0) hash = 27;
+ __ ori(at, zero_reg, 27);
+ __ movz(hash, at, hash);
}
void SubStringStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label sub_string_runtime;
+ // Stack frame on entry.
+ // ra: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = t2;
+ Register from = t3;
+
+ // Check bounds and smi-ness.
+ __ lw(to, MemOperand(sp, kToOffset));
+ __ lw(from, MemOperand(sp, kFromOffset));
+ STATIC_ASSERT(kFromOffset == kToOffset + 4);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+ __ JumpIfNotSmi(from, &sub_string_runtime);
+ __ JumpIfNotSmi(to, &sub_string_runtime);
+
+ __ sra(a3, from, kSmiTagSize); // Remove smi tag.
+ __ sra(t5, to, kSmiTagSize); // Remove smi tag.
+
+ // a3: from index (untagged smi)
+ // t5: to index (untagged smi)
+
+ __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
+
+ __ subu(a2, t5, a3);
+ __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
+
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+ // Both to and from are smis.
+
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ // Make sure first argument is a sequential (or flat) string.
+ __ lw(t1, MemOperand(sp, kStringOffset));
+ __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
+
+ __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ And(t4, a1, Operand(kIsNotStringMask));
+
+ __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ Label seq_string;
+ __ And(t0, a1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+
+ // External strings go to runtime.
+ __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+ // Sequential strings are handled directly.
+ __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
+ __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // Cons and External strings go to runtime.
+ __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
+ __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
+ to = no_reg;
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ // Check for flat ASCII string.
+ Label non_ascii_flat;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+
+ __ And(t4, a1, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
+
+ Label result_longer_than_two;
+ __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+
+ // Sub string of length 2 requested.
+ // Get the two characters forming the sub string.
+ __ Addu(t1, t1, Operand(a3));
+ __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
+ __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+
+ // a2: result string length.
+ // a3: two characters combined into halfword in little endian byte order.
+ __ bind(&make_two_character_string);
+ __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+ __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&result_longer_than_two);
+
+ // Allocate the result.
+ __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+
+ // v0: result string.
+ // a2: result string length.
+ // a3: from index (untagged smi)
+ // t1: string.
+ // t3: (a.k.a. from): from offset (smi)
+ // Locate first character of result.
+ __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t1, t1, Operand(a3));
+
+ // v0: result string.
+ // a1: first character of result string.
+ // a2: result string length.
+ // t1: first character of sub string to copy.
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_flat);
+ // a2: result string length.
+ // t1: string.
+ // t3: (a.k.a. from): from offset (smi)
+ // Check for flat two byte string.
+
+ // Allocate the result.
+ __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+ // v0: result string.
+ // a2: result string length.
+ // t1: string.
+ // Locate first character of result.
+ __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ __ Addu(t1, t1, Operand(from));
+ from = no_reg;
+
+ // v0: result string.
+ // a1: first character of result.
+ // a2: result length.
+ // t1: first character of string to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&sub_string_runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ lw(length, FieldMemOperand(left, String::kLengthOffset));
+ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+ __ bind(&strings_not_equal);
+ __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ // Compare characters.
+ __ bind(&compare_chars);
+
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, length, scratch2, scratch3, v0,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
}
@@ -756,62 +5610,1001 @@
Register scratch2,
Register scratch3,
Register scratch4) {
- UNIMPLEMENTED_MIPS();
+ Label result_not_equal, compare_lengths;
+ // Find minimum length and length difference.
+ __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subu(scratch3, scratch1, Operand(scratch2));
+ Register length_delta = scratch3;
+ __ slt(scratch4, scratch2, scratch1);
+ __ movn(scratch1, scratch2, scratch4);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4, v0,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use length_delta as result if it's zero.
+ __ mov(scratch2, length_delta);
+ __ mov(scratch4, zero_reg);
+ __ mov(v0, zero_reg);
+
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
+ Label ret;
+ __ Branch(&ret, eq, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(GREATER)));
+ __ Branch(&ret, gt, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(LESS)));
+ __ bind(&ret);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Addu(scratch1, length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(left, left, Operand(scratch1));
+ __ Addu(right, right, Operand(scratch1));
+ __ Subu(length, zero_reg, length);
+ Register index = length; // index = -length;
+
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ Addu(scratch3, left, index);
+ __ lbu(scratch1, MemOperand(scratch3));
+ __ Addu(scratch3, right, index);
+ __ lbu(scratch2, MemOperand(scratch3));
+ __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+ __ Addu(index, index, 1);
+ __ Branch(&loop, ne, index, Operand(zero_reg));
}
void StringCompareStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
+ __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+
+ Label not_same;
+ __ Branch(¬_same, ne, a0, Operand(a1));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(¬_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first.
+ __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
void StringAddStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack on entry:
+ // sp[0]: second argument (right).
+ // sp[4]: first argument (left).
+
+ // Load the two arguments.
+ __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (flags_ == NO_STRING_ADD_FLAGS) {
+ __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+ // Load instance types.
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kStringTag == 0);
+ // If either is not a string, go to runtime.
+ __ Or(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kIsNotStringMask));
+ __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
+ }
+
+ // Both arguments are strings.
+ // a0: first string
+ // a1: second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ {
+ Label strings_not_empty;
+ // Check if either of the strings are empty. In that case return the other.
+ // These tests use zero-length check on string-length whch is an Smi.
+ // Assert that Smi::FromInt(0) is really 0.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
+ __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
+ __ mov(v0, a0); // Assume we'll return first string (from a0).
+ __ movz(v0, a1, a2); // If first is empty, return second (from a1).
+ __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
+ __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
+ __ and_(t4, t4, t5); // Branch if both strings were non-empty.
+ __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
+
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&strings_not_empty);
+ }
+
+ // Untag both string-lengths.
+ __ sra(a2, a2, kSmiTagSize);
+ __ sra(a3, a3, kSmiTagSize);
+
+ // Both strings are non-empty.
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ // Adding two lengths can't overflow.
+ STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+ __ Addu(t2, a2, Operand(a3));
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
+ __ Branch(&longer_than_two, ne, t2, Operand(2));
+
+ // Check that both strings are non-external ASCII strings.
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&make_two_character_string);
+ // Resulting string has length 2 and first chars of two strings
+ // are combined into single halfword in a2 register.
+ // So we can fill resulting string without two loops by a single
+ // halfword store instruction (which assumes that processor is
+ // in a little endian mode).
+ __ li(t2, Operand(2));
+ __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+ __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ Branch(&string_add_flat_result, lt, t2,
+ Operand(String::kMinNonFlatLength));
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ ASSERT(IsPowerOf2(String::kMaxLength + 1));
+ // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+ __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+
+ // If result is not supposed to be flat, allocate a cons string object.
+ // If both strings are ASCII the result is an ASCII cons string.
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ Label non_ascii, allocated, ascii_data;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+ __ And(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
+
+ // Allocate an ASCII cons string.
+ __ bind(&ascii_data);
+ __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+ __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ASCII characters.
+ // t0: first instance type.
+ // t1: second instance type.
+ // Branch to if _both_ instances have kAsciiDataHintMask set.
+ __ And(at, t0, Operand(kAsciiDataHintMask));
+ __ and_(at, at, t1);
+ __ Branch(&ascii_data, ne, at, Operand(zero_reg));
+
+ __ xor_(t0, t0, t1);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
+ // Allocate a two byte cons string.
+ __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ Branch(&allocated);
+
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t2: sum of lengths.
+ __ bind(&string_add_flat_result);
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ // Check that both strings are sequential, meaning that we
+ // branch to runtime if either string tag is non-zero.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ Or(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kStringRepresentationMask));
+ __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type
+ // t1: second string instance type
+ // t2: sum of lengths.
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ xor_(t3, t1, t0);
+ __ And(t3, t3, Operand(kStringEncodingMask));
+ __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+ // And see if it's ASCII (0) or two-byte (1).
+ __ And(t3, t0, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ // t2: length of resulting flat string
+ __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+ // Locate first character of result.
+ __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // a0: first character of first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: first character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
+
+ // Load second argument and locate first character.
+ __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // a1: first character of second string.
+ // a3: length of second string.
+ // t2: next character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_string_add_flat_result);
+ // Both strings are sequential two byte strings.
+ // a0: first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: sum of length of strings.
+ __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+ // a0: first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t3: result string.
+
+ // Locate first character of result.
+ __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // a0: first character of first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: first character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+ // Locate first character of second argument.
+ __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // a1: first character of second string.
+ // a3: length of second string.
+ // t2: next character of result (after copy of first string).
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
+
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, ¬_string);
+ __ GetObjectType(arg, scratch1, scratch1);
+ __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(¬_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ false,
+ ¬_cached);
+ __ mov(arg, scratch1);
+ __ sw(arg, MemOperand(sp, stack_offset));
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(¬_cached);
+ __ JumpIfSmi(arg, slow);
+ __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
+ __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
+ __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ And(scratch2, scratch2, scratch4);
+ __ Branch(slow, ne, scratch2, Operand(scratch4));
+ __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+ __ sw(arg, MemOperand(sp, stack_offset));
+
+ __ bind(&done);
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(state_ == CompareIC::SMIS);
+ Label miss;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Subu(v0, a0, a1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(a1);
+ __ SmiUntag(a0);
+ __ Subu(v0, a1, a0);
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+ Label generic_stub;
+ Label unordered;
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &generic_stub);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved or FPU is unsupported.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ // Load left and right operand.
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+
+ Label fpu_eq, fpu_lt, fpu_gt;
+ // Compare operands (test if unordered).
+ __ c(UN, D, f0, f2);
+ // Don't base result on status bits when a NaN is involved.
+ __ bc1t(&unordered);
+ __ nop();
+
+ // Test if equal.
+ __ c(EQ, D, f0, f2);
+ __ bc1t(&fpu_eq);
+ __ nop();
+
+ // Test if unordered or less (unordered case is already handled).
+ __ c(ULT, D, f0, f2);
+ __ bc1t(&fpu_lt);
+ __ nop();
+
+ // Otherwise it's greater.
+ __ bc1f(&fpu_gt);
+ __ nop();
+
+ // Return a result of -1, 0, or 1.
+ __ bind(&fpu_eq);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&fpu_lt);
+ __ li(v0, Operand(LESS));
+ __ Ret();
+
+ __ bind(&fpu_gt);
+ __ li(v0, Operand(GREATER));
+ __ Ret();
+
+ __ bind(&unordered);
+ }
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ __ bind(&generic_stub);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- }
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are symbols.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp1, tmp1, Operand(tmp2));
+ __ And(tmp1, tmp1, kIsSymbolMask);
+ __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(v0, right);
+ // Symbols are compared by identity.
+ __ Ret(ne, left, Operand(right));
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(state_ == CompareIC::STRINGS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+ Register tmp3 = t0;
+ Register tmp4 = t1;
+ Register tmp5 = t2;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Or(tmp3, tmp1, tmp2);
+ __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+ __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+ // Fast check for identical strings.
+ Label left_ne_right;
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // In the delay slot.
+ __ Ret();
+ __ bind(&left_ne_right);
+
+ // Handle not identical strings.
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ Label is_symbol;
+ __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+ __ mov(v0, a0); // In the delay slot.
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ __ Ret();
+ __ bind(&is_symbol);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ Push(left, right);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(state_ == CompareIC::OBJECTS);
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &miss);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+ ASSERT(GetCondition() == eq);
+ __ Subu(v0, a0, Operand(a1));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ __ Push(a1, a0);
+ __ push(ra);
+
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ masm->isolate());
+ __ EnterInternalFrame();
+ __ Push(a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op_)));
+ __ push(t0);
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+ // Compute the entry point of the rewritten stub.
+ __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ pop(ra);
+ __ pop(a0);
+ __ pop(a1);
+ __ Jump(a2);
+}
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // No need to pop or drop anything, LeaveExitFrame will restore the old
+ // stack, thus dropping the allocated space for the return value.
+ // The saved ra is after the reserved stack space for the 4 args.
+ __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
+
+ if (FLAG_debug_code && EnableSlowAsserts()) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC.
+ // Dereference the address and check for this.
+ __ lw(t0, MemOperand(t9));
+ __ Assert(ne, "Received invalid return address.", t0,
+ Operand(reinterpret_cast<uint32_t>(kZapValue)));
+ }
+ __ Jump(t9);
}
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range) {
- UNIMPLEMENTED_MIPS();
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ ExternalReference function) {
+ __ li(t9, Operand(function));
+ this->GenerateCall(masm, t9);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ __ Move(t9, target);
+ __ AssertStackIsAligned();
+ // Allocate space for arg slots.
+ __ Subu(sp, sp, kCArgsSlotsSize);
+
+ // Block the trampoline pool through the whole function to make sure the
+ // number of generated instructions is constant.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+
+ // We need to get the current 'pc' value, which is not available on MIPS.
+ Label find_ra;
+ masm->bal(&find_ra); // ra = pc + 8.
+ masm->nop(); // Branch delay slot nop.
+ masm->bind(&find_ra);
+
+ const int kNumInstructionsToJump = 6;
+ masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
+ // Push return address (accessible to GC through exit frame pc).
+ // This spot for ra was reserved in EnterExitFrame.
+ masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET), true);
+ // Call the function.
+ masm->Jump(t9);
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ lw(index, FieldMemOperand(properties, kCapacityOffset));
+ __ Subu(index, index, Operand(1));
+ __ And(index, index, Operand(
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ sll(index, index, 1);
+ __ Addu(index, index, at);
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+
+ __ sll(scratch0, index, 1);
+ __ Addu(tmp, properties, scratch0);
+ __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ Branch(done, eq, entity_name, Operand(tmp));
+
+ if (i != kInlinedProbes - 1) {
+ // Stop if found the property.
+ __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+
+ // Check if the entry name is not a symbol.
+ __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ lbu(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+ __ Branch(miss, eq, scratch0, Operand(zero_reg));
+
+ // Restore the properties.
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+ }
+
+ const int spill_mask =
+ (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
+ a2.bit() | a1.bit() | a0.bit());
+
+ __ MultiPush(spill_mask);
+ __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ li(a1, Operand(Handle<String>(name)));
+ StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, eq, v0, Operand(zero_reg));
+ __ Branch(miss, ne, v0, Operand(zero_reg));
+ return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ // Compute the capacity mask.
+ __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
+ __ Subu(scratch1, scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ Addu(scratch2, scratch2, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ }
+ __ srl(scratch2, scratch2, String::kHashShift);
+ __ And(scratch2, scratch1, scratch2);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+
+ __ mov(at, scratch2);
+ __ sll(scratch2, scratch2, 1);
+ __ Addu(scratch2, scratch2, at);
+
+ // Check if the key is identical to the name.
+ __ sll(at, scratch2, 2);
+ __ Addu(scratch2, elements, at);
+ __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Branch(done, eq, name, Operand(at));
+ }
+
+ const int spill_mask =
+ (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
+ a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ MultiPush(spill_mask);
+ __ Move(a0, elements);
+ __ Move(a1, name);
+ StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ mov(scratch2, a2);
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, ne, v0, Operand(zero_reg));
+ __ Branch(miss, eq, v0, Operand(zero_reg));
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Registers:
+ // result: StringDictionary to probe
+ // a1: key
+ // : StringDictionary to probe.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Register result = v0;
+ Register dictionary = a0;
+ Register key = a1;
+ Register index = a2;
+ Register mask = a3;
+ Register hash = t0;
+ Register undefined = t1;
+ Register entry_key = t2;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ sra(mask, mask, kSmiTagSize);
+ __ Subu(mask, mask, Operand(1));
+
+ __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ Addu(index, hash, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ } else {
+ __ mov(index, hash);
+ }
+ __ srl(index, index, String::kHashShift);
+ __ And(index, mask, index);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ sll(index, index, 1);
+ __ Addu(index, index, at);
+
+
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ sll(index, index, 2);
+ __ Addu(index, index, dictionary);
+ __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
+
+ // Stop if found the property.
+ __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a symbol.
+ __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ lbu(entry_key,
+ FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ And(result, entry_key, Operand(kIsSymbolMask));
+ __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result, zero_reg);
+ __ Ret();
+ }
+
+ __ bind(&in_dictionary);
+ __ li(result, 1);
+ __ Ret();
+
+ __ bind(¬_in_dictionary);
+ __ mov(result, zero_reg);
+ __ Ret();
}
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index d1307b5..3f0b150 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -456,6 +456,27 @@
const char* GetName() { return "RegExpCEntryStub"; }
};
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm,
+ ExternalReference function);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
+ const char* GetName() { return "DirectCEntryStub"; }
+};
class FloatingPointHelper : public AllStatic {
public:
@@ -608,13 +629,14 @@
void Generate(MacroAssembler* masm);
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- String* name,
- Register scratch0) ;
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 1b3e27a..ee7a93b 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,106 +40,259 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
- return false;
+ return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
+ // Mips return sequence:
+ // mov sp, fp
+ // lw fp, sp(0)
+ // lw ra, sp(4)
+ // addiu sp, sp, 8
+ // addiu sp, sp, N
+ // jr ra
+ // nop (in branch delay slot)
+
+ // Make sure this constant matches the number if instrucntions we emit.
+ ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+ CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ // li and Call pseudo-instructions emit two instructions each.
+ patcher.masm()->li(v8::internal::t9,
+ Operand(reinterpret_cast<int32_t>(
+ Isolate::Current()->debug()->debug_break_return()->entry())));
+ patcher.masm()->Call(v8::internal::t9);
+ patcher.masm()->nop();
+ patcher.masm()->nop();
+ patcher.masm()->nop();
+
+ // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+ // patcher.masm()->bkpt(0);
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceInstructions);
}
// A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- UNIMPLEMENTED_MIPS();
- return false;
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
- UNIMPLEMENTED_MIPS();
- return false;
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(IsDebugBreakSlot());
+ // Patch the code changing the debug break slot code from:
+ // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // to a call to the debug break slot code.
+ // li t9, address (lui t9 / ori t9 instruction pair)
+ // call t9 (jalr t9 / nop instruction pair)
+ CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+ Isolate::Current()->debug()->debug_break_return()->entry())));
+ patcher.masm()->Call(v8::internal::t9);
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
}
#define __ ACCESS_MASM(masm)
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs) {
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ And(at, reg, 0xc0000000);
+ __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ }
+ __ sll(reg, reg, kSmiTagSize);
+ }
+ }
+ __ MultiPush(object_regs | non_object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ mov(a0, zero_reg); // No arguments.
+ __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ srl(reg, reg, kSmiTagSize);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
+ }
+ }
+ }
+
+ __ LeaveInternalFrame();
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ __ li(t9, Operand(
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+ __ lw(t9, MemOperand(t9));
+ __ Jump(t9);
+}
+
+
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC load (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers a0 and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC store (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ // Registers a0, a1, and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC call (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a2: name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for construct call (from builtins-mips.cc).
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // In places other than IC call sites it is expected that v0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
}
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // No registers used on entry.
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
void Debug::GenerateSlot(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_codesize));
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ masm->Abort("LiveEdit frame dropping is not supported on mips");
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ masm->Abort("LiveEdit frame dropping is not supported on mips");
}
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index f54202d..faaa0e0 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -38,12 +38,7 @@
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
- Address sp = fp + ExitFrameConstants::kSPOffset;
- if (marker == NULL) {
- sp -= FPURegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
- }
- return sp;
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 788bd4e..fd93e8b 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -101,22 +101,24 @@
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -1 * kPointerSize;
- // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
- static const int kCodeOffset = -1 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
+ // See some explanation in MacroAssembler::EnterExitFrame.
+ // This marks the top of the extra allocated stack space.
+ static const int kStackSpaceOffset = -3 * kPointerSize;
- // TODO(mips): Use a patched sp value on the stack instead.
- // A marker of 0 indicates that double registers are saved.
- static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
+
+ static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +1 * kPointerSize;
+ // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+ static const int kCallerSPOffset = +2 * kPointerSize;
+
// FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
};
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index ce51450..c454d98 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -48,52 +48,765 @@
#define __ ACCESS_MASM(masm)
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // elements: holds the property dictionary on fall through.
+ // Scratch registers:
+ // scratch0: used to holds the receiver map.
+ // scratch1: used to holds the receiver instance type, receiver bit mask
+ // and elements map.
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ GetObjectType(receiver, scratch0, scratch1);
+ __ Branch(miss, lt, scratch1, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+ // Check that the global object does not require access checks.
+ __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+ __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
+ __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+ __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at,
+ scratch1,
+ Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ __ lw(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ sw(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // reg1 - Used to hold the capacity mask of the dictionary.
+ //
+ // reg2 - Used for the index into the dictionary.
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ __ nor(reg1, reg0, zero_reg);
+ __ sll(at, reg0, 15);
+ __ addu(reg0, reg1, at);
+
+ // hash = hash ^ (hash >> 12);
+ __ srl(at, reg0, 12);
+ __ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ __ sll(at, reg0, 2);
+ __ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ __ srl(at, reg0, 4);
+ __ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ __ li(reg1, Operand(2057));
+ __ mul(reg0, reg0, reg1);
+
+ // hash = hash ^ (hash >> 16);
+ __ srl(at, reg0, 16);
+ __ xor_(reg0, reg0, at);
+
+ // Compute the capacity mask.
+ __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ __ sra(reg1, reg1, kSmiTagSize);
+ __ Subu(reg1, reg1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use reg2 for index calculations and keep the hash intact in reg0.
+ __ mov(reg2, reg0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ __ and_(reg2, reg2, reg1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ __ sll(at, reg2, 1); // 2x.
+ __ addu(reg2, reg2, at); // reg2 = reg2 * 3.
+
+ // Check if the key is identical to the name.
+ __ sll(at, reg2, kPointerSizeLog2);
+ __ addu(reg2, elements, at);
+
+ __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+ if (i != kProbes - 1) {
+ __ Branch(&done, eq, key, Operand(at));
+ } else {
+ __ Branch(miss, ne, key, Operand(at));
+ }
+ }
+
+ __ bind(&done);
+ // Check that the value is a normal property.
+ // reg2: elements + (index * kPointerSize).
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+ __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ __ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
+ support_wrappers);
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ Branch(slow, ne, at, Operand(zero_reg));
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode (not dictionary).
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(not_fast_array, ne, scratch1, Operand(at));
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // Check that the key (index) is within bounds.
+ __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+ // Fast case: Do the load.
+ __ Addu(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(at, at, scratch1);
+ __ lw(scratch2, MemOperand(at));
+
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ Branch(out_of_range, eq, scratch2, Operand(at));
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // The key is not a smi.
+ // Is it a string?
+ __ GetObjectType(key, map, hash);
+ __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+
+ // Is the string an array index, with cached numeric value?
+ __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ Branch(index_string, eq, at, Operand(zero_reg));
+
+ // Is the string a symbol?
+ // map: key map
+ __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ And(at, hash, Operand(kIsSymbolMask));
+ __ Branch(not_symbol, eq, at, Operand(zero_reg));
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind) {
+ // ----------- S t a t e -------------
+ // -- a1 : receiver
+ // -- a2 : name
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ JumpIfSmi(a1, &number, t1);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, a1);
+ __ Branch(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, a1);
+ __ Branch(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&boolean, eq, a1, Operand(t0));
+ __ LoadRoot(t1, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t1));
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ __ bind(&miss);
+}
+
+
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss,
+ Register scratch) {
+ // a1: function
+
+ // Check that the value isn't a smi.
+ __ JumpIfSmi(a1, miss);
+
+ // Check that the value is a JSFunction.
+ __ GetObjectType(a1, scratch, scratch);
+ __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+
+ // a0: elements
+ // Search the dictionary - put result in register a1.
+ GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
+
+ GenerateFunctionTailCall(masm, argc, &miss, t0);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+}
+
+
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
+ } else {
+ __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
+ }
+
+ // Get the receiver of the function from the stack.
+ __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+ __ EnterInternalFrame();
+
+ // Push the receiver and the name of the function.
+ __ Push(a3, a2);
+
+ // Call the entry.
+ __ li(a0, Operand(2));
+ __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Move result to a1 and leave the internal frame.
+ __ mov(a1, v0);
+ __ LeaveInternalFrame();
+
+ // Check if the receiver is a global object of some sort.
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ lw(a2, MemOperand(sp, argc * kPointerSize));
+ __ andi(t0, a2, kSmiTagMask);
+ __ Branch(&invoke, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a2, MemOperand(sp, argc * kPointerSize));
+ __ bind(&invoke);
+ }
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+}
+
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
}
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
+ GenerateMiss(masm, argc);
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc);
}
void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
}
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a2, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
+
+ GenerateFastArrayLoad(
+ masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
+
+ __ bind(&do_call);
+ // receiver in a1 is not used after this point.
+ // a2: key
+ // a1: function
+
+ GenerateFunctionTailCall(masm, argc, &slow_call, a0);
+
+ __ bind(&check_number_dictionary);
+ // a2: key
+ // a3: elements map
+ // t0: elements pointer
+ // Check whether the elements is a number dictionary.
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow_load, ne, a3, Operand(at));
+ __ sra(a0, a2, kSmiTagSize);
+ // a0: untagged index
+ GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
+ __ jmp(&do_call);
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
+ __ EnterInternalFrame();
+ __ push(a2); // Save the key.
+ __ Push(a1, a2); // Pass the receiver and the key.
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(a2); // Restore the key.
+ __ LeaveInternalFrame();
+ __ mov(a1, v0);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(
+ masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
+
+ __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
+
+ GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
+ GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub,
+ // that will get fetched next time.
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ __ IndexFromHash(a3, a2);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Check if the name is a string.
+ Label miss;
+ __ JumpIfSmi(a2, &miss);
+ __ IsObjectJSStringType(a2, a0, &miss);
+
+ GenerateCallNormal(masm, argc);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
}
@@ -101,17 +814,63 @@
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a0, a2, a3, t0, t1);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+
+ // a1: elements
+ GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+ __ mov(a3, a0);
+ __ Push(a3, a2);
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
}
@@ -119,71 +878,540 @@
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+ __ Push(a1, a0);
+
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss),
+ isolate);
+ __ TailCallExternalReference(ref, 2, 1);
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, check_string, index_smi, index_string, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+ // Check the "has fast elements" bit in the receiver's map which is
+ // now in a2.
+ __ lbu(a3, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ And(at, a3, Operand(1 << Map::kHasFastElements));
+ __ Branch(&check_number_dictionary, eq, at, Operand(zero_reg));
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
+
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // a0: key
+ // a3: elements map
+ // t0: elements
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow, ne, a3, Operand(at));
+ __ sra(a2, a0, kSmiTagSize);
+ GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+ __ Ret();
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+ 1,
+ a2,
+ a3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
+ __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ sra(at, t0, String::kHashShift);
+ __ xor_(a3, a3, at);
+ __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+ __ li(t0, Operand(cache_keys));
+ __ sll(at, a3, kPointerSizeLog2 + 1);
+ __ addu(t0, t0, at);
+ __ lw(t1, MemOperand(t0)); // Move t0 to symbol.
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ Branch(&slow, ne, a2, Operand(t1));
+ __ lw(t1, MemOperand(t0));
+ __ Branch(&slow, ne, a0, Operand(t1));
+
+ // Get field offset.
+ // a0 : key
+ // a1 : receiver
+ // a2 : receiver's map
+ // a3 : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+ __ li(t0, Operand(cache_field_offsets));
+ __ sll(at, a3, kPointerSizeLog2);
+ __ addu(at, t0, at);
+ __ lw(t1, MemOperand(at));
+ __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+ __ Subu(t1, t1, t2);
+ __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+ // Load in-object property.
+ __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ addu(t2, t2, t1); // Index from start of object.
+ __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ sll(at, t2, kPointerSizeLog2);
+ __ addu(at, a1, at);
+ __ lw(v0, MemOperand(at));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ sll(t0, t1, kPointerSizeLog2);
+ __ Addu(t0, t0, a1);
+ __ lw(v0, MemOperand(t0));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // a1: receiver
+ // a0: key
+ // a3: elements
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
+ // Load the property to v0.
+ GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+ __ bind(&index_string);
+ __ IndexFromHash(a3, key);
+ // Now jump to the place where smi keys are handled.
+ __ Branch(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key (index)
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register receiver = a1;
+ Register index = a0;
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+ Register result = v0;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+ __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label slow, fast, array, extra, exit;
+
+ // Register usage.
+ Register value = a0;
+ Register key = a1;
+ Register receiver = a2;
+ Register elements = a3; // Elements array of the receiver.
+ // t0 is used as ip in the arm version.
+ // t3-t4 are used as temporaries.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Get the map of the object.
+ __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+ // Check if the object is a JS array or not.
+ __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+ __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+ // Check that the object is some kind of JS object.
+ __ Branch(&slow, lt, t3, Operand(FIRST_JS_OBJECT_TYPE));
+
+ // Object case: Check key against length in the elements array.
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode and writable.
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&fast, lo, key, Operand(t0));
+ // Fall thru to slow if un-tagged index >= length.
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+
+ // Entry registers are intact.
+ // a0: value.
+ // a1: key.
+ // a2: receiver.
+
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ __ bind(&extra);
+ // Only support writing to array[array.length].
+ __ Branch(&slow, ne, key, Operand(t0));
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&slow, hs, key, Operand(t0));
+ // Calculate key + 1 as smi.
+ ASSERT_EQ(0, kSmiTag);
+ __ Addu(t3, key, Operand(Smi::FromInt(1)));
+ __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ bind(&array);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
+
+ // Check the key against the length in the array.
+ __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&extra, hs, key, Operand(t0));
+ // Fall through to fast case.
+
+ __ bind(&fast);
+ // Fast case, store the value to the elements backing store.
+ __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t4, t4, Operand(t1));
+ __ sw(value, MemOperand(t4));
+ // Skip write barrier if the written value is a smi.
+ __ JumpIfSmi(value, &exit);
+
+ // Update write barrier for the elements array address.
+ __ Subu(t3, t4, Operand(elements));
+
+ __ RecordWrite(elements, Operand(t3), t4, t5);
+ __ bind(&exit);
+
+ __ mov(v0, a0); // Return the value written.
+ __ Ret();
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a1, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+ // Get the map of the receiver.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
+ __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
+ // Everything is fine, call runtime.
+ __ Push(a1, a0); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ // We can't use MultiPush as the order of the registers is important.
+ __ Push(a2, a1, a0);
+
+ ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ strict_mode);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // (currently anything except for external and pixel arrays which means
+ // anything with elements of FixedArray type.), but currently is restricted
+ // to JSArray.
+ // Value must be a number, but only smis are accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = a1;
+ Register value = a0;
+ Register scratch = a3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ GetObjectType(scratch, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ GenerateMiss(masm);
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+
+ GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+ GenerateMiss(masm);
}
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+
+ __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(a1, a0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
@@ -191,18 +1419,119 @@
Condition CompareIC::ComputeCondition(Token::Value op) {
- UNIMPLEMENTED_MIPS();
- return kNoCondition;
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ // Reverse left and right operands to obtain ECMA-262 conversion order.
+ return lt;
+ case Token::LTE:
+ // Reverse left and right operands to obtain ECMA-262 conversion order.
+ return ge;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
}
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- UNIMPLEMENTED_MIPS();
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+ State state = TargetState(previous_state, false, x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
}
void PatchInlinedSmiCode(Address address) {
- // Currently there is no smi inlining in the MIPS full code generator.
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ if (!Assembler::IsAndImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetImmediate16(instr);
+ delta += Assembler::GetRs(instr) * kImm16Mask;
+ // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+ // signals that nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
+ address, andi_instruction_address, delta);
+ }
+#endif
+
+ Address patch_address =
+ andi_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+ ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+ ASSERT(Assembler::IsBranch(branch_instr));
+ if (Assembler::IsBeq(branch_instr)) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, eq, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, ne, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ patcher.ChangeBranchCondition(ne);
+ } else {
+ ASSERT(Assembler::IsBne(branch_instr));
+ // This is patching a "jump if smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, ne, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, eq, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ patcher.ChangeBranchCondition(eq);
+ }
}
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e95335d..8591698 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1992,6 +1992,159 @@
}
+void MacroAssembler::Throw(Register value) {
+ // v0 is expected to hold the exception.
+ Move(v0, value);
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ li(a3, Operand(ExternalReference(Isolate::k_handler_address,
+ isolate())));
+ lw(sp, MemOperand(a3));
+
+ // Restore the next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a2);
+ sw(a2, MemOperand(a3));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ MultiPop(a3.bit() | fp.bit());
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ // Set cp to NULL if fp is NULL.
+ Label done;
+ Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
+ mov(cp, zero_reg); // In branch delay slot.
+ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&done);
+
+#ifdef DEBUG
+ // When emitting debug_code, set ra as return address for the jump.
+ // 5 instructions: add: 1, pop: 2, jump: 2.
+ const int kOffsetRaInstructions = 5;
+ Label find_ra;
+
+ if (emit_debug_code()) {
+ // Compute ra for the Jump(t9).
+ const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+ // This branch-and-link sequence is needed to get the current PC on mips,
+ // saved to the ra register. Then adjusted for instruction count.
+ bal(&find_ra); // bal exposes branch-delay.
+ nop(); // Branch delay slot nop.
+ bind(&find_ra);
+ addiu(ra, ra, kOffsetRaBytes);
+ }
+#endif
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(t9); // 2 instructions: lw, add sp.
+ Jump(t9); // 2 instructions: jr, nop (in delay slot).
+
+ if (emit_debug_code()) {
+ // Make sure that the expected number of instructions were generated.
+ ASSERT_EQ(kOffsetRaInstructions,
+ InstructionsGeneratedSince(&find_ra));
+ }
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // v0 is expected to hold the exception.
+ Move(v0, value);
+
+ // Drop sp to the top stack handler.
+ li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+ lw(sp, MemOperand(a3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ lw(a2, MemOperand(sp, kStateOffset));
+ Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ lw(sp, MemOperand(sp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a2);
+ sw(a2, MemOperand(a3));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
+ li(a0, Operand(false, RelocInfo::NONE));
+ li(a2, Operand(external_caught));
+ sw(a0, MemOperand(a2));
+
+ // Set pending exception and v0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate())));
+ sw(v0, MemOperand(a2));
+ }
+
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // ra
+
+ // Discard handler state (a2 is not used) and restore frame pointer.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ MultiPop(a2.bit() | fp.bit()); // a2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ Label cp_null;
+ Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
+ mov(cp, zero_reg); // In the branch delay slot.
+ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&cp_null);
+
+#ifdef DEBUG
+ // When emitting debug_code, set ra as return address for the jump.
+ // 5 instructions: add: 1, pop: 2, jump: 2.
+ const int kOffsetRaInstructions = 5;
+ Label find_ra;
+
+ if (emit_debug_code()) {
+ // Compute ra for the Jump(t9).
+ const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+ // This branch-and-link sequence is needed to get the current PC on mips,
+ // saved to the ra register. Then adjusted for instruction count.
+ bal(&find_ra); // bal exposes branch-delay slot.
+ nop(); // Branch delay slot nop.
+ bind(&find_ra);
+ addiu(ra, ra, kOffsetRaBytes);
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(t9); // 2 instructions: lw, add sp.
+ Jump(t9); // 2 instructions: jr, nop (in delay slot).
+
+ if (emit_debug_code()) {
+ // Make sure that the expected number of instructions were generated.
+ ASSERT_EQ(kOffsetRaInstructions,
+ InstructionsGeneratedSince(&find_ra));
+ }
+}
+
+
void MacroAssembler::AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
@@ -2351,7 +2504,7 @@
// Copy bytes in word size chunks.
bind(&word_loop);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
Assert(eq, "Expecting alignment for CopyBytes",
scratch, Operand(zero_reg));
@@ -2680,11 +2833,136 @@
}
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+ Register r1, const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ return result;
+}
+
+
+
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
+ return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+ ExternalReference function, int stack_space) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(),
+ next_address);
+
+ // Allocate HandleScope in callee-save registers.
+ li(s3, Operand(next_address));
+ lw(s0, MemOperand(s3, kNextOffset));
+ lw(s1, MemOperand(s3, kLimitOffset));
+ lw(s2, MemOperand(s3, kLevelOffset));
+ Addu(s2, s2, Operand(1));
+ sw(s2, MemOperand(s3, kLevelOffset));
+
+ // The O32 ABI requires us to pass a pointer in a0 where the returned struct
+ // (4 bytes) will be placed. This is also built into the Simulator.
+ // Set up the pointer to the returned value (a0). It was allocated in
+ // EnterExitFrame.
+ addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, function);
+
+ // As mentioned above, on MIPS a pointer is returned - we need to dereference
+ // it to get the actual return value (which is also a pointer).
+ lw(v0, MemOperand(v0));
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ // If result is non-zero, dereference to get the result value
+ // otherwise set it to undefined.
+ Label skip;
+ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ Branch(&skip, eq, v0, Operand(zero_reg));
+ lw(a0, MemOperand(v0));
+ bind(&skip);
+ mov(v0, a0);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ sw(s0, MemOperand(s3, kNextOffset));
+ if (emit_debug_code()) {
+ lw(a1, MemOperand(s3, kLevelOffset));
+ Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+ }
+ Subu(s2, s2, Operand(1));
+ sw(s2, MemOperand(s3, kLevelOffset));
+ lw(at, MemOperand(s3, kLimitOffset));
+ Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ lw(t1, MemOperand(at));
+ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ li(s0, Operand(stack_space));
+ LeaveExitFrame(false, s0);
+ Ret();
+
+ bind(&promote_scheduled_exception);
+ MaybeObject* result = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ sw(s1, MemOperand(s3, kLimitOffset));
+ mov(s0, v0);
+ mov(a0, v0);
+ PrepareCallCFunction(1, s1);
+ li(a0, Operand(ExternalReference::isolate_address()));
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+ 1);
+ mov(v0, s0);
+ jmp(&leave_exit_frame);
+
+ return result;
+}
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
@@ -2893,6 +3171,16 @@
JumpToExternalReference(ext);
}
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, num_arguments);
+ return TryJumpToExternalReference(ext);
+}
+
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -2910,6 +3198,14 @@
}
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+ const ExternalReference& builtin) {
+ li(a1, Operand(builtin));
+ CEntryStub stub(1);
+ return TryTailCallStub(&stub);
+}
+
+
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -3144,33 +3440,34 @@
}
-void MacroAssembler::EnterExitFrame(Register hold_argc,
- Register hold_argv,
- Register hold_function,
- bool save_doubles) {
- // a0 is argc.
- sll(t8, a0, kPointerSizeLog2);
- addu(hold_argv, sp, t8);
- addiu(hold_argv, hold_argv, -kPointerSize);
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ int stack_space) {
+ // Setup the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
- // Compute callee's stack pointer before making changes and save it as
- // t9 register so that it is restored as sp register on exit, thereby
- // popping the args.
- // t9 = sp + kPointerSize * #args
- addu(t9, sp, t8);
-
- // Align the stack at this point.
- AlignStack(0);
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 (==kSPOffset)] - sp of the called function
+ // [fp - 2 (==kCodeOffset)] - CodeObject
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
// Save registers.
- addiu(sp, sp, -12);
- sw(t9, MemOperand(sp, 8));
- sw(ra, MemOperand(sp, 4));
- sw(fp, MemOperand(sp, 0));
- mov(fp, sp); // Setup new frame pointer.
+ addiu(sp, sp, -4 * kPointerSize);
+ sw(ra, MemOperand(sp, 3 * kPointerSize));
+ sw(fp, MemOperand(sp, 2 * kPointerSize));
+ addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
- li(t8, Operand(CodeObject()));
- push(t8); // Accessed from ExitFrame::code_slot.
+ if (emit_debug_code()) {
+ sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+
+ li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
+ sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
@@ -3178,49 +3475,31 @@
li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
sw(cp, MemOperand(t8));
- // Setup argc and the builtin function in callee-saved registers.
- mov(hold_argc, a0);
- mov(hold_function, a1);
+ // Ensure we are not saving doubles, since it's not implemented yet.
+ ASSERT(save_doubles == 0);
- // Optionally save all double registers.
- if (save_doubles) {
-#ifdef DEBUG
- int frame_alignment = ActivationFrameAlignment();
-#endif
- // The stack alignment code above made sp unaligned, so add space for one
- // more double register and use aligned addresses.
- ASSERT(kDoubleSize == frame_alignment);
- // Mark the frame as containing doubles by pushing a non-valid return
- // address, i.e. 0.
- ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
- push(zero_reg); // Marker and alignment word.
- int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
- Subu(sp, sp, Operand(space));
- // Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
- FPURegister reg = FPURegister::from_code(i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
- }
- // Note that f0 will be accessible at fp - 2*kPointerSize -
- // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
- // alignment word were pushed after the fp.
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by the DirectCEntryStub to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ ASSERT(stack_space >= 0);
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ addiu(at, sp, kPointerSize);
+ sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Optionally restore all double registers.
- if (save_doubles) {
- // TODO(regis): Use vldrm instruction.
- // Remember: we only need to restore every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
- FPURegister reg = FPURegister::from_code(i);
- // Register f30-f31 is just below the marker.
- const int offset = ExitFrameConstants::kMarkerOffset;
- ldc1(reg, MemOperand(fp,
- (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
- }
- }
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+ Register argument_count) {
+ // Ensure we are not restoring doubles, since it's not implemented yet.
+ ASSERT(save_doubles == 0);
// Clear top frame.
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
@@ -3235,11 +3514,13 @@
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
- lw(fp, MemOperand(sp, 0));
- lw(ra, MemOperand(sp, 4));
- lw(sp, MemOperand(sp, 8));
- jr(ra);
- nop(); // Branch delay slot nop.
+ lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+ addiu(sp, sp, 8);
+ if (argument_count.is_valid()) {
+ sll(t8, argument_count, kPointerSizeLog2);
+ addu(sp, sp, t8);
+ }
}
@@ -3273,39 +3554,24 @@
#endif // defined(V8_HOST_ARCH_MIPS)
}
+void MacroAssembler::AssertStackIsAligned() {
+ if (emit_debug_code()) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
-void MacroAssembler::AlignStack(int offset) {
- // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
- // and an offset of 1 aligns to 4 modulo 8 bytes.
-#if defined(V8_HOST_ARCH_MIPS)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one MIPS
- // platform for another MIPS platform with a different alignment.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_MIPS)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so we will always align at
- // this point here.
- int activation_frame_alignment = 2 * kPointerSize;
-#endif // defined(V8_HOST_ARCH_MIPS)
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- if (offset == 0) {
- andi(t8, sp, activation_frame_alignment - 1);
- Push(zero_reg, eq, t8, zero_reg);
- } else {
- andi(t8, sp, activation_frame_alignment - 1);
- addiu(t8, t8, -4);
- Push(zero_reg, eq, t8, zero_reg);
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ ASSERT(IsPowerOf2(frame_alignment));
+ andi(at, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop("Unexpected stack alignment");
+ bind(&alignment_as_expected);
+ }
}
- }
}
-
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
Register reg,
Register scratch,
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 0b89951..ea49dfa 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -581,23 +581,21 @@
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame.
- // Expects the number of arguments in register a0 and
- // the builtin function to call in register a1.
- // On output hold_argc, hold_function, and hold_argv are setup.
- void EnterExitFrame(Register hold_argc,
- Register hold_argv,
- Register hold_function,
- bool save_doubles);
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles,
+ int stack_space = 0);
- // Leave the current exit frame. Expects the return value in v0.
- void LeaveExitFrame(bool save_doubles);
-
- // Align the stack by optionally pushing a Smi zero.
- void AlignStack(int offset); // TODO(mips) : remove this function.
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
+
void LoadContext(Register dst, int context_chain_length);
void LoadGlobalFunction(int index, Register function);
@@ -669,6 +667,13 @@
// Must preserve the result register.
void PopTryHandler();
+ // Passes thrown value (in v0) to the handler of top of the try handler chain.
+ void Throw(Register value);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain.
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -790,9 +795,27 @@
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ // Call a code stub and return the code object called. Try to generate
+ // the code if necessary. Do not perform a GC but instead return a retry
+ // after GC failure.
+ MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 =
+ Operand(zero_reg));
+
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
+ // Tail call a code stub (jump) and return the code object called. Try to
+ // generate the code if necessary. Do not perform a GC but instead return
+ // a retry after GC failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 =
+ Operand(zero_reg));
+
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
@@ -813,6 +836,12 @@
int num_arguments,
int result_size);
+ // Tail call of a runtime routine (jump). Try to generate the code if
+ // necessary. Do not perform a GC but instead return a retry after GC
+ // failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size);
+
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
@@ -840,12 +869,18 @@
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
-
void GetCFunctionDoubleResult(const DoubleRegister dst);
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context.
+ MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+ int stack_space);
+
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
+ MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index a0ce605..ba78253 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -155,6 +155,9 @@
break;
case FILLER_TYPE:
break;
+ case JS_PROXY_TYPE:
+ JSProxy::cast(this)->JSProxyVerify();
+ break;
case PROXY_TYPE:
Proxy::cast(this)->ProxyVerify();
break;
@@ -461,6 +464,11 @@
}
+void JSProxy::JSProxyVerify() {
+ ASSERT(IsJSProxy());
+ VerifyPointer(handler());
+}
+
void Proxy::ProxyVerify() {
ASSERT(IsProxy());
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index ebf8465..c96bf56 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -584,6 +584,12 @@
}
+bool Object::IsJSProxy() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE;
+}
+
+
bool Object::IsProxy() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
@@ -1898,6 +1904,7 @@
CAST_ACCESSOR(Code)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSRegExp)
+CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(Proxy)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(ExternalArray)
@@ -3521,6 +3528,9 @@
}
+ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+
+
Address Proxy::proxy() {
return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
}
@@ -3560,6 +3570,8 @@
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, next_code_flushing_candidate,
+ Object, kNextCodeFlushingCandidateOffset)
byte* Code::instruction_start() {
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 5b1f5a7..7a584ba 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -148,6 +148,9 @@
case CODE_TYPE:
Code::cast(this)->CodePrint(out);
break;
+ case JS_PROXY_TYPE:
+ JSProxy::cast(this)->JSProxyPrint(out);
+ break;
case PROXY_TYPE:
Proxy::cast(this)->ProxyPrint(out);
break;
@@ -408,6 +411,7 @@
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
case JS_ARRAY_TYPE: return "JS_ARRAY";
+ case JS_PROXY_TYPE: return "JS_PROXY";
case JS_REGEXP_TYPE: return "JS_REGEXP";
case JS_VALUE_TYPE: return "JS_VALUE";
case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
@@ -530,6 +534,15 @@
}
+void JSProxy::JSProxyPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSProxy");
+ PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - handler = ");
+ handler()->Print(out);
+ PrintF(out, "\n");
+}
+
+
void JSFunction::JSFunctionPrint(FILE* out) {
HeapObject::PrintHeader(out, "Function");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index bcb48ea..eab864b 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -88,6 +88,11 @@
case SHARED_FUNCTION_INFO_TYPE:
return kVisitSharedFunctionInfo;
+ case JS_PROXY_TYPE:
+ return GetVisitorIdForSize(kVisitDataObject,
+ kVisitDataObjectGeneric,
+ JSProxy::kSize);
+
case PROXY_TYPE:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
diff --git a/src/objects.cc b/src/objects.cc
index 724a734..1821f50 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -134,24 +134,22 @@
void Object::Lookup(String* name, LookupResult* result) {
Object* holder = NULL;
if (IsSmi()) {
- Heap* heap = Isolate::Current()->heap();
- Context* global_context = heap->isolate()->context()->global_context();
+ Context* global_context = Isolate::Current()->context()->global_context();
holder = global_context->number_function()->instance_prototype();
} else {
HeapObject* heap_object = HeapObject::cast(this);
if (heap_object->IsJSObject()) {
return JSObject::cast(this)->Lookup(name, result);
}
- Heap* heap = heap_object->GetHeap();
+ Context* global_context = Isolate::Current()->context()->global_context();
if (heap_object->IsString()) {
- Context* global_context = heap->isolate()->context()->global_context();
holder = global_context->string_function()->instance_prototype();
} else if (heap_object->IsHeapNumber()) {
- Context* global_context = heap->isolate()->context()->global_context();
holder = global_context->number_function()->instance_prototype();
} else if (heap_object->IsBoolean()) {
- Context* global_context = heap->isolate()->context()->global_context();
holder = global_context->boolean_function()->instance_prototype();
+ } else if (heap_object->IsJSProxy()) {
+ return result->NotFound(); // For now...
}
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
@@ -494,12 +492,13 @@
Heap* heap = name->GetHeap();
// Traverse the prototype chain from the current object (this) to
- // the holder and check for access rights. This avoid traversing the
+ // the holder and check for access rights. This avoids traversing the
// objects more than once in case of interceptors, because the
// holder will always be the interceptor holder and the search may
// only continue with a current object just after the interceptor
// holder in the prototype chain.
Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+ ASSERT(this != this->GetPrototype());
for (Object* current = this; true; current = current->GetPrototype()) {
if (current->IsAccessCheckNeeded()) {
// Check if we're allowed to read from the current object. Note
@@ -575,6 +574,8 @@
holder = global_context->number_function()->instance_prototype();
} else if (heap_object->IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
+ } else if (heap_object->IsJSProxy()) {
+ return heap->undefined_value(); // For now...
} else {
// Undefined and null have no indexed properties.
ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
@@ -595,9 +596,10 @@
HeapObject* heap_object = HeapObject::cast(this);
- // The object is either a number, a string, a boolean, or a real JS object.
- if (heap_object->IsJSObject()) {
- return JSObject::cast(this)->map()->prototype();
+ // The object is either a number, a string, a boolean,
+ // a real JS object, or a Harmony proxy.
+ if (heap_object->IsJSObject() || heap_object->IsJSProxy()) {
+ return heap_object->map()->prototype();
}
Heap* heap = heap_object->GetHeap();
Context* context = heap->isolate()->context()->global_context();
@@ -1154,6 +1156,9 @@
case ODDBALL_TYPE:
Oddball::BodyDescriptor::IterateBody(this, v);
break;
+ case JS_PROXY_TYPE:
+ JSProxy::BodyDescriptor::IterateBody(this, v);
+ break;
case PROXY_TYPE:
reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
break;
@@ -2868,8 +2873,9 @@
// exception. dictionary->DeleteProperty will return false_value()
// if a non-configurable property is being deleted.
HandleScope scope;
+ Handle<Object> self(this);
Handle<Object> i = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { i, Handle<Object>(this) };
+ Handle<Object> args[2] = { i, self };
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_delete_property", HandleVector(args, 2)));
}
@@ -7287,7 +7293,7 @@
// api style callbacks.
if (structure->IsAccessorInfo()) {
- AccessorInfo* data = AccessorInfo::cast(structure);
+ Handle<AccessorInfo> data(AccessorInfo::cast(structure));
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
HandleScope scope(isolate);
@@ -7344,14 +7350,16 @@
if (structure->IsAccessorInfo()) {
// api style callbacks
- AccessorInfo* data = AccessorInfo::cast(structure);
+ Handle<JSObject> self(this);
+ Handle<JSObject> holder_handle(JSObject::cast(holder));
+ Handle<AccessorInfo> data(AccessorInfo::cast(structure));
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", this, *key));
- CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
+ LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+ CustomArguments args(isolate, data->data(), *self, *holder_handle);
v8::AccessorInfo info(args.end());
{
// Leaving JavaScript.
@@ -7553,8 +7561,8 @@
// If put fails instrict mode, throw exception.
if (!dictionary->ValueAtPut(entry, value) &&
strict_mode == kStrictMode) {
- Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
Handle<Object> holder(this);
+ Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { number, holder };
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_read_only_property",
diff --git a/src/objects.h b/src/objects.h
index 763c92b..cb4a420 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -90,6 +90,7 @@
// - Code
// - Map
// - Oddball
+// - JSProxy
// - Proxy
// - SharedFunctionInfo
// - Struct
@@ -287,6 +288,7 @@
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
+ V(JS_PROXY_TYPE) \
V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \
/* Note: the order of these external array */ \
@@ -515,6 +517,7 @@
// objects.
HEAP_NUMBER_TYPE,
PROXY_TYPE,
+ JS_PROXY_TYPE,
BYTE_ARRAY_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
@@ -720,6 +723,7 @@
V(Proxy) \
V(Boolean) \
V(JSArray) \
+ V(JSProxy) \
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
@@ -3317,6 +3321,12 @@
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [code_flushing_candidate]: Field only used during garbage
+ // collection to hold code flushing candidates. The contents of this
+ // field does not have to be traced during garbage collection since
+ // it is only used by the garbage collector itself.
+ DECL_ACCESSORS(next_code_flushing_candidate, Object)
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline FixedArray* unchecked_deoptimization_data();
@@ -3537,9 +3547,12 @@
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
static const int kDeoptimizationDataOffset =
kRelocationInfoOffset + kPointerSize;
- static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
- static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
+ static const int kNextCodeFlushingCandidateOffset =
+ kDeoptimizationDataOffset + kPointerSize;
+ static const int kFlagsOffset =
+ kNextCodeFlushingCandidateOffset + kPointerSize;
+ static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlagsSize = 2 * kIntSize;
static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
@@ -3947,7 +3960,7 @@
// An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to me
+// It doesn't carry much functionality but allows struct classes to be
// identified in the type system.
class Struct: public HeapObject {
public:
@@ -6094,6 +6107,39 @@
};
+// The JSProxy describes EcmaScript Harmony proxies
+class JSProxy: public HeapObject {
+ public:
+ // [handler]: The handler property.
+ DECL_ACCESSORS(handler, Object)
+
+ // Casting.
+ static inline JSProxy* cast(Object* obj);
+
+ // Dispatched behavior.
+#ifdef OBJECT_PRINT
+ inline void JSProxyPrint() {
+ JSProxyPrint(stdout);
+ }
+ void JSProxyPrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void JSProxyVerify();
+#endif
+
+ // Layout description.
+ static const int kHandlerOffset = HeapObject::kHeaderSize;
+ static const int kSize = kHandlerOffset + kPointerSize;
+
+ typedef FixedBodyDescriptor<kHandlerOffset,
+ kHandlerOffset + kPointerSize,
+ kSize> BodyDescriptor;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
+};
+
+
// Proxy describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
diff --git a/src/parser.cc b/src/parser.cc
index 9e4031a..d5353c7 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1309,7 +1309,7 @@
var = top_scope_->LocalLookup(name);
if (var == NULL) {
// Declare the name.
- var = top_scope_->DeclareLocal(name, mode);
+ var = top_scope_->DeclareLocal(name, mode, Scope::VAR_OR_CONST);
} else {
// The name was declared before; check for conflicting
// re-declarations. If the previous declaration was a const or the
@@ -1581,6 +1581,12 @@
is_const /* always bound for CONST! */,
CHECK_OK);
nvars++;
+ if (top_scope_->num_var_or_const() > kMaxNumFunctionLocals) {
+ ReportMessageAt(scanner().location(), "too_many_variables",
+ Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
// Parse initialization expression if present and/or needed. A
// declaration of the form:
@@ -3564,7 +3570,9 @@
reserved_loc = scanner().location();
}
- Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
+ Variable* parameter = top_scope_->DeclareLocal(param_name,
+ Variable::VAR,
+ Scope::PARAMETER);
top_scope_->AddParameter(parameter);
num_parameters++;
if (num_parameters > kMaxNumFunctionParameters) {
@@ -4084,6 +4092,21 @@
}
+Handle<String> JsonParser::GetSymbol() {
+ int literal_length = scanner_.literal_length();
+ if (literal_length == 0) {
+ return isolate()->factory()->empty_string();
+ }
+ if (scanner_.is_literal_ascii()) {
+ return isolate()->factory()->LookupAsciiSymbol(
+ scanner_.literal_ascii_string());
+ } else {
+ return isolate()->factory()->LookupTwoByteSymbol(
+ scanner_.literal_uc16_string());
+ }
+}
+
+
// Parse any JSON value.
Handle<Object> JsonParser::ParseJsonValue() {
Token::Value token = scanner_.Next();
@@ -4125,7 +4148,7 @@
if (scanner_.Next() != Token::STRING) {
return ReportUnexpectedToken();
}
- Handle<String> key = GetString();
+ Handle<String> key = GetSymbol();
if (scanner_.Next() != Token::COLON) {
return ReportUnexpectedToken();
}
diff --git a/src/parser.h b/src/parser.h
index f50e2e3..01cf611 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -449,6 +449,7 @@
// construct a hashable id, so if more than 2^17 are allowed, this
// should be checked.
static const int kMaxNumFunctionParameters = 32766;
+ static const int kMaxNumFunctionLocals = 32767;
FunctionLiteral* ParseLazy(CompilationInfo* info,
UC16CharacterStream* source,
ZoneScope* zone_scope);
@@ -814,6 +815,8 @@
Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
// Converts the currently parsed literal to a JavaScript String.
Handle<String> GetString();
+ // Converts the currently parsed literal to a JavaScript Symbol String.
+ Handle<String> GetSymbol();
Isolate* isolate_;
JsonScanner scanner_;
diff --git a/src/proxy.js b/src/proxy.js
index 2516983..01d48b4 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -26,3 +26,40 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
global.Proxy = new $Object();
+
+var $Proxy = global.Proxy
+
+var fundamentalTraps = [
+ "getOwnPropertyDescriptor",
+ "getPropertyDescriptor",
+ "getOwnPropertyNames",
+ "getPropertyNames",
+ "defineProperty",
+ "delete",
+ "fix",
+]
+
+var derivedTraps = [
+ "has",
+ "hasOwn",
+ "get",
+ "set",
+ "enumerate",
+ "keys",
+]
+
+var functionTraps = [
+ "callTrap",
+ "constructTrap",
+]
+
+$Proxy.createFunction = function(handler, callTrap, constructTrap) {
+ handler.callTrap = callTrap
+ handler.constructTrap = constructTrap
+ $Proxy.create(handler)
+}
+
+$Proxy.create = function(handler, proto) {
+ if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+ return %CreateJSProxy(handler, proto)
+}
diff --git a/src/runtime.cc b/src/runtime.cc
index 88f1bc6..7b90469 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -588,6 +588,17 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
+ ASSERT(args.length() == 2);
+ Object* handler = args[0];
+ Object* prototype = args[1];
+ Object* used_prototype =
+ (prototype->IsJSObject() || prototype->IsJSProxy()) ? prototype
+ : isolate->heap()->null_value();
+ return isolate->heap()->AllocateJSProxy(handler, used_prototype);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[0]);
@@ -4183,25 +4194,33 @@
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[1]);
+ uint32_t index;
+ const bool key_is_array_index = key->AsArrayIndex(&index);
+
Object* obj = args[0];
// Only JS objects can have properties.
if (obj->IsJSObject()) {
JSObject* object = JSObject::cast(obj);
- // Fast case - no interceptors.
+ // Fast case: either the key is a real named property or it is not
+ // an array index and there are no interceptors or hidden
+ // prototypes.
if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
- // Slow case. Either it's not there or we have an interceptor. We should
- // have handles for this kind of deal.
+ Map* map = object->map();
+ if (!key_is_array_index &&
+ !map->has_named_interceptor() &&
+ !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
+ return isolate->heap()->false_value();
+ }
+ // Slow case.
HandleScope scope(isolate);
return HasLocalPropertyImplementation(isolate,
Handle<JSObject>(object),
Handle<String>(key));
- } else if (obj->IsString()) {
+ } else if (obj->IsString() && key_is_array_index) {
// Well, there is one exception: Handle [] on strings.
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- String* string = String::cast(obj);
- if (index < static_cast<uint32_t>(string->length()))
- return isolate->heap()->true_value();
+ String* string = String::cast(obj);
+ if (index < static_cast<uint32_t>(string->length())) {
+ return isolate->heap()->true_value();
}
}
return isolate->heap()->false_value();
@@ -7909,12 +7928,13 @@
// If the "property" we were looking for is a local variable or an
// argument in a context, the receiver is the global object; see
// ECMA-262, 3rd., 10.1.6 and 10.2.3.
- JSObject* receiver =
- isolate->context()->global()->global_receiver();
+ // GetElement below can cause GC.
+ Handle<JSObject> receiver(
+ isolate->context()->global()->global_receiver());
MaybeObject* value = (holder->IsContext())
? Context::cast(*holder)->get(index)
: JSObject::cast(*holder)->GetElement(index);
- return MakePair(Unhole(isolate->heap(), value, attributes), receiver);
+ return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
}
// If the holder is found, we read the property from it.
@@ -7929,10 +7949,14 @@
} else {
receiver = ComputeReceiverForNonGlobal(isolate, object);
}
+
+ // GetProperty below can cause GC.
+ Handle<JSObject> receiver_handle(receiver);
+
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
MaybeObject* value = object->GetProperty(*name);
- return MakePair(value, receiver);
+ return MakePair(value, *receiver_handle);
}
if (throw_error) {
diff --git a/src/runtime.h b/src/runtime.h
index a61e681..d3223d1 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -275,6 +275,9 @@
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
+ /* Harmony proxies */ \
+ F(CreateJSProxy, 2, 1) \
+ \
/* Catch context extension objects */ \
F(CreateCatchExtensionObject, 2, 1) \
\
diff --git a/src/runtime.js b/src/runtime.js
index 4f53efe..77b97ae 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -644,6 +644,6 @@
// NOTE: Setting the prototype for Array must take place as early as
// possible due to code generation for array literals. When
// generating code for a array literal a boilerplate array is created
-// that is cloned when running the code. It is essiential that the
+// that is cloned when running the code. It is essential that the
// boilerplate gets the right prototype.
%FunctionSetPrototype($Array, new $Array(0));
diff --git a/src/scopes.cc b/src/scopes.cc
index 7eadab0..6102442 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -203,6 +203,7 @@
inner_scope_calls_eval_ = false;
outer_scope_is_eval_scope_ = false;
force_eager_compilation_ = false;
+ num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
scope_info_ = scope_info;
@@ -365,12 +366,17 @@
}
-Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+Variable* Scope::DeclareLocal(Handle<String> name,
+ Variable::Mode mode,
+ LocalType type) {
// DYNAMIC variables are introduces during variable allocation,
// INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary().
ASSERT(!resolved());
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+ if (type == VAR_OR_CONST) {
+ num_var_or_const_++;
+ }
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
diff --git a/src/scopes.h b/src/scopes.h
index 44688ae..faa6fd9 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -95,6 +95,11 @@
GLOBAL_SCOPE // the top-level scope for a program or a top-level eval
};
+ enum LocalType {
+ PARAMETER,
+ VAR_OR_CONST
+ };
+
Scope(Scope* outer_scope, Type type);
virtual ~Scope() { }
@@ -134,7 +139,9 @@
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
- virtual Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
+ virtual Variable* DeclareLocal(Handle<String> name,
+ Variable::Mode mode,
+ LocalType type);
// Declare an implicit global variable in this scope which must be a
// global scope. The variable was introduced (possibly from an inner
@@ -288,6 +295,9 @@
// cases the context parameter is an empty handle.
void AllocateVariables(Handle<Context> context);
+ // Current number of var or const locals.
+ int num_var_or_const() { return num_var_or_const_; }
+
// Result of variable allocation.
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
@@ -380,6 +390,9 @@
bool outer_scope_is_eval_scope_;
bool force_eager_compilation_;
+ // Computed as variables are declared.
+ int num_var_or_const_;
+
// Computed via AllocateVariables; function scopes only.
int num_stack_slots_;
int num_heap_slots_;
diff --git a/src/top.cc b/src/top.cc
deleted file mode 100644
index 6d68231..0000000
--- a/src/top.cc
+++ /dev/null
@@ -1,977 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "messages.h"
-#include "platform.h"
-#include "simulator.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-
-// TODO(isolates): move to isolate.cc. This stuff is kept here to
-// simplify merging.
-
-namespace v8 {
-namespace internal {
-
-ThreadLocalTop::ThreadLocalTop() {
- InitializeInternal();
-}
-
-
-void ThreadLocalTop::InitializeInternal() {
- c_entry_fp_ = 0;
- handler_ = 0;
-#ifdef USE_SIMULATOR
- simulator_ = NULL;
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- js_entry_sp_ = NULL;
- external_callback_ = NULL;
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
- current_vm_state_ = EXTERNAL;
-#endif
- try_catch_handler_address_ = NULL;
- context_ = NULL;
- thread_id_ = ThreadId::Invalid();
- external_caught_exception_ = false;
- failed_access_check_callback_ = NULL;
- save_context_ = NULL;
- catcher_ = NULL;
-}
-
-
-void ThreadLocalTop::Initialize() {
- InitializeInternal();
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- simulator_ = Simulator::current(isolate_);
-#elif V8_TARGET_ARCH_MIPS
- simulator_ = Simulator::current(isolate_);
-#endif
-#endif
- thread_id_ = ThreadId::Current();
-}
-
-
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
- return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-Address Isolate::get_address_from_id(Isolate::AddressId id) {
- return isolate_addresses_[id];
-}
-
-
-char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
- Iterate(v, thread);
- return thread_storage + sizeof(ThreadLocalTop);
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v) {
- v->VisitThread(this, thread_local_top());
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v, char* t) {
- ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
- v->VisitThread(this, thread);
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
- // Visit the roots from the top for a given thread.
- Object* pending;
- // The pending exception can sometimes be a failure. We can't show
- // that to the GC, which only understands objects.
- if (thread->pending_exception_->ToObject(&pending)) {
- v->VisitPointer(&pending);
- thread->pending_exception_ = pending; // In case GC updated it.
- }
- v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
- v->VisitPointer(BitCast<Object**>(&(thread->context_)));
- Object* scheduled;
- if (thread->scheduled_exception_->ToObject(&scheduled)) {
- v->VisitPointer(&scheduled);
- thread->scheduled_exception_ = scheduled;
- }
-
- for (v8::TryCatch* block = thread->TryCatchHandler();
- block != NULL;
- block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
- v->VisitPointer(BitCast<Object**>(&(block->exception_)));
- v->VisitPointer(BitCast<Object**>(&(block->message_)));
- }
-
- // Iterate over pointers on native execution stack.
- for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
- it.frame()->Iterate(v);
- }
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v) {
- ThreadLocalTop* current_t = thread_local_top();
- Iterate(v, current_t);
-}
-
-
-void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
- // The ARM simulator has a separate JS stack. We therefore register
- // the C++ try catch handler with the simulator and get back an
- // address that can be used for comparisons with addresses into the
- // JS stack. When running without the simulator, the address
- // returned will be the address of the C++ try catch handler itself.
- Address address = reinterpret_cast<Address>(
- SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
- thread_local_top()->set_try_catch_handler_address(address);
-}
-
-
-void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_top()->TryCatchHandler() == that);
- thread_local_top()->set_try_catch_handler_address(
- reinterpret_cast<Address>(that->next_));
- thread_local_top()->catcher_ = NULL;
- SimulatorStack::UnregisterCTryCatch();
-}
-
-
-Handle<String> Isolate::StackTraceString() {
- if (stack_trace_nesting_level_ == 0) {
- stack_trace_nesting_level_++;
- HeapStringAllocator allocator;
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(&allocator);
- incomplete_message_ = &accumulator;
- PrintStack(&accumulator);
- Handle<String> stack_trace = accumulator.ToString();
- incomplete_message_ = NULL;
- stack_trace_nesting_level_ = 0;
- return stack_trace;
- } else if (stack_trace_nesting_level_ == 1) {
- stack_trace_nesting_level_++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
- return factory()->empty_symbol();
- } else {
- OS::Abort();
- // Unreachable
- return factory()->empty_symbol();
- }
-}
-
-
-Handle<JSArray> Isolate::CaptureCurrentStackTrace(
- int frame_limit, StackTrace::StackTraceOptions options) {
- // Ensure no negative values.
- int limit = Max(frame_limit, 0);
- Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
-
- Handle<String> column_key = factory()->LookupAsciiSymbol("column");
- Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
- Handle<String> name_or_source_url_key =
- factory()->LookupAsciiSymbol("nameOrSourceURL");
- Handle<String> script_name_or_source_url_key =
- factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
- Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
- Handle<String> constructor_key =
- factory()->LookupAsciiSymbol("isConstructor");
-
- StackTraceFrameIterator it(this);
- int frames_seen = 0;
- while (!it.done() && (frames_seen < limit)) {
- JavaScriptFrame* frame = it.frame();
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
- // Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
-
- Handle<JSFunction> fun = frames[i].function();
- Handle<Script> script(Script::cast(fun->shared()->script()));
-
- if (options & StackTrace::kLineNumber) {
- int script_line_offset = script->line_offset()->value();
- int position = frames[i].code()->SourcePosition(frames[i].pc());
- int line_number = GetScriptLineNumber(script, position);
- // line_number is already shifted by the script_line_offset.
- int relative_line_number = line_number - script_line_offset;
- if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int start = (relative_line_number == 0) ? 0 :
- Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script
- // tag.
- column_offset += script->column_offset()->value();
- }
- SetLocalPropertyNoThrow(stackFrame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)));
- }
- SetLocalPropertyNoThrow(stackFrame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)));
- }
-
- if (options & StackTrace::kScriptName) {
- Handle<Object> script_name(script->name(), this);
- SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
- }
-
- if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> script_name(script->name(), this);
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = factory()->undefined_value();
- }
- SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
- result);
- }
-
- if (options & StackTrace::kFunctionName) {
- Handle<Object> fun_name(fun->shared()->name(), this);
- if (fun_name->ToBoolean()->IsFalse()) {
- fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
- }
- SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
- }
-
- if (options & StackTrace::kIsEval) {
- int type = Smi::cast(script->compilation_type())->value();
- Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
- factory()->true_value() : factory()->false_value();
- SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
- }
-
- if (options & StackTrace::kIsConstructor) {
- Handle<Object> is_constructor = (frames[i].is_constructor()) ?
- factory()->true_value() : factory()->false_value();
- SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
- }
-
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
- frames_seen++;
- }
- it.Advance();
- }
-
- stack_trace->set_length(Smi::FromInt(frames_seen));
- return stack_trace;
-}
-
-
-void Isolate::PrintStack() {
- if (stack_trace_nesting_level_ == 0) {
- stack_trace_nesting_level_++;
-
- StringAllocator* allocator;
- if (preallocated_message_space_ == NULL) {
- allocator = new HeapStringAllocator();
- } else {
- allocator = preallocated_message_space_;
- }
-
- StringStream::ClearMentionedObjectCache();
- StringStream accumulator(allocator);
- incomplete_message_ = &accumulator;
- PrintStack(&accumulator);
- accumulator.OutputToStdOut();
- accumulator.Log();
- incomplete_message_ = NULL;
- stack_trace_nesting_level_ = 0;
- if (preallocated_message_space_ == NULL) {
- // Remove the HeapStringAllocator created above.
- delete allocator;
- }
- } else if (stack_trace_nesting_level_ == 1) {
- stack_trace_nesting_level_++;
- OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
- OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
- incomplete_message_->OutputToStdOut();
- }
-}
-
-
-static void PrintFrames(StringStream* accumulator,
- StackFrame::PrintMode mode) {
- StackFrameIterator it;
- for (int i = 0; !it.done(); it.Advance()) {
- it.frame()->Print(accumulator, mode, i++);
- }
-}
-
-
-void Isolate::PrintStack(StringStream* accumulator) {
- if (!IsInitialized()) {
- accumulator->Add(
- "\n==== Stack trace is not available ==========================\n\n");
- accumulator->Add(
- "\n==== Isolate for the thread is not initialized =============\n\n");
- return;
- }
- // The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
- ASSERT(StringStream::IsMentionedObjectCacheClear());
-
- // Avoid printing anything if there are no frames.
- if (c_entry_fp(thread_local_top()) == 0) return;
-
- accumulator->Add(
- "\n==== Stack trace ============================================\n\n");
- PrintFrames(accumulator, StackFrame::OVERVIEW);
-
- accumulator->Add(
- "\n==== Details ================================================\n\n");
- PrintFrames(accumulator, StackFrame::DETAILS);
-
- accumulator->PrintMentionedObjectCache();
- accumulator->Add("=====================\n\n");
-}
-
-
-void Isolate::SetFailedAccessCheckCallback(
- v8::FailedAccessCheckCallback callback) {
- thread_local_top()->failed_access_check_callback_ = callback;
-}
-
-
-void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
- if (!thread_local_top()->failed_access_check_callback_) return;
-
- ASSERT(receiver->IsAccessCheckNeeded());
- ASSERT(context());
-
- // Get the data object from access check info.
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return;
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return;
-
- HandleScope scope;
- Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
- thread_local_top()->failed_access_check_callback_(
- v8::Utils::ToLocal(receiver_handle),
- type,
- v8::Utils::ToLocal(data));
-}
-
-
-enum MayAccessDecision {
- YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
- JSObject* receiver,
- v8::AccessType type) {
- // During bootstrapping, callback functions are not enabled yet.
- if (isolate->bootstrapper()->IsActive()) return YES;
-
- if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
- if (!receiver_context->IsContext()) return NO;
-
- // Get the global context of current top context.
- // avoid using Isolate::global_context() because it uses Handle.
- Context* global_context = isolate->context()->global()->global_context();
- if (receiver_context == global_context) return YES;
-
- if (Context::cast(receiver_context)->security_token() ==
- global_context->security_token())
- return YES;
- }
-
- return UNKNOWN;
-}
-
-
-bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
-
- // The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
-
- // Skip checks for hidden properties access. Note, we do not
- // require existence of a context in this case.
- if (key == heap_.hidden_symbol()) return true;
-
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get named access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
- v8::NamedSecurityCallback callback =
- v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> key_handle(key, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiNamedSecurityCheck(key));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(this, EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- v8::Utils::ToLocal(key_handle),
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-bool Isolate::MayIndexedAccess(JSObject* receiver,
- uint32_t index,
- v8::AccessType type) {
- ASSERT(receiver->IsAccessCheckNeeded());
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- ASSERT(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- // Get indexed access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
- if (!constructor->shared()->IsApiFunction()) return false;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == heap_.undefined_value()) return false;
-
- Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
- v8::IndexedSecurityCallback callback =
- v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
- if (!callback) return false;
-
- HandleScope scope(this);
- Handle<JSObject> receiver_handle(receiver, this);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
- LOG(this, ApiIndexedSecurityCheck(index));
- bool result = false;
- {
- // Leaving JavaScript.
- VMState state(this, EXTERNAL);
- result = callback(v8::Utils::ToLocal(receiver_handle),
- index,
- type,
- v8::Utils::ToLocal(data));
- }
- return result;
-}
-
-
-const char* const Isolate::kStackOverflowMessage =
- "Uncaught RangeError: Maximum call stack size exceeded";
-
-
-Failure* Isolate::StackOverflow() {
- HandleScope scope;
- Handle<String> key = factory()->stack_overflow_symbol();
- Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
- Handle<Object> exception = Copy(boilerplate);
- // TODO(1240995): To avoid having to call JavaScript code to compute
- // the message for stack overflow exceptions which is very likely to
- // double fault with another stack overflow exception, we use a
- // precomputed message.
- DoThrow(*exception, NULL);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::TerminateExecution() {
- DoThrow(heap_.termination_exception(), NULL);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
- DoThrow(exception, location);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
- bool can_be_caught_externally = false;
- ShouldReportException(&can_be_caught_externally,
- is_catchable_by_javascript(exception));
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // Set the exception being re-thrown.
- set_pending_exception(exception);
- return Failure::Exception();
-}
-
-
-Failure* Isolate::ThrowIllegalOperation() {
- return Throw(heap_.illegal_access_symbol());
-}
-
-
-void Isolate::ScheduleThrow(Object* exception) {
- // When scheduling a throw we first throw the exception to get the
- // error reporting if it is uncaught before rescheduling it.
- Throw(exception);
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
-}
-
-
-Failure* Isolate::PromoteScheduledException() {
- MaybeObject* thrown = scheduled_exception();
- clear_scheduled_exception();
- // Re-throw the exception to avoid getting repeated error reporting.
- return ReThrow(thrown);
-}
-
-
-void Isolate::PrintCurrentStackTrace(FILE* out) {
- StackTraceFrameIterator it(this);
- while (!it.done()) {
- HandleScope scope;
- // Find code position if recorded in relocation info.
- JavaScriptFrame* frame = it.frame();
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
- Handle<Object> pos_obj(Smi::FromInt(pos));
- // Fetch function and receiver.
- Handle<JSFunction> fun(JSFunction::cast(frame->function()));
- Handle<Object> recv(frame->receiver());
- // Advance to the next JavaScript frame and determine if the
- // current frame is the top-level frame.
- it.Advance();
- Handle<Object> is_top_level = it.done()
- ? factory()->true_value()
- : factory()->false_value();
- // Generate and print stack trace line.
- Handle<String> line =
- Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
- if (line->length() > 0) {
- line->PrintOn(out);
- fprintf(out, "\n");
- }
- }
-}
-
-
-void Isolate::ComputeLocation(MessageLocation* target) {
- *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
- StackTraceFrameIterator it(this);
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* fun = JSFunction::cast(frame->function());
- Object* script = fun->shared()->script();
- if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
- // Compute the location from the function and the reloc info.
- Handle<Script> casted_script(Script::cast(script));
- *target = MessageLocation(casted_script, pos, pos + 1);
- }
- }
-}
-
-
-bool Isolate::ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript) {
- // Find the top-most try-catch handler.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && !handler->is_try_catch()) {
- handler = handler->next();
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-catch
- // handler.
- *can_be_caught_externally = external_handler_address != NULL &&
- (handler == NULL || handler->address() > external_handler_address ||
- !catchable_by_javascript);
-
- if (*can_be_caught_externally) {
- // Only report the exception if the external handler is verbose.
- return try_catch_handler()->is_verbose_;
- } else {
- // Report the exception if it isn't caught by JavaScript code.
- return handler == NULL;
- }
-}
-
-
-void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
- ASSERT(!has_pending_exception());
-
- HandleScope scope;
- Object* exception_object = Smi::FromInt(0);
- bool is_object = exception->ToObject(&exception_object);
- Handle<Object> exception_handle(exception_object);
-
- // Determine reporting and whether the exception is caught externally.
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- // Only real objects can be caught by JS.
- ASSERT(!catchable_by_javascript || is_object);
- bool can_be_caught_externally = false;
- bool should_report_exception =
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
- bool report_exception = catchable_by_javascript && should_report_exception;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger of exception.
- if (catchable_by_javascript) {
- debugger_->OnException(exception_handle, report_exception);
- }
-#endif
-
- // Generate the message.
- Handle<Object> message_obj;
- MessageLocation potential_computed_location;
- bool try_catch_needs_message =
- can_be_caught_externally &&
- try_catch_handler()->capture_message_;
- if (report_exception || try_catch_needs_message) {
- if (location == NULL) {
- // If no location was specified we use a computed one instead
- ComputeLocation(&potential_computed_location);
- location = &potential_computed_location;
- }
- if (!bootstrapper()->IsActive()) {
- // It's not safe to try to make message objects or collect stack
- // traces while the bootstrapper is active since the infrastructure
- // may not have been properly initialized.
- Handle<String> stack_trace;
- if (FLAG_trace_exception) stack_trace = StackTraceString();
- Handle<JSArray> stack_trace_object;
- if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
- stack_trace_object = CaptureCurrentStackTrace(
- stack_trace_for_uncaught_exceptions_frame_limit_,
- stack_trace_for_uncaught_exceptions_options_);
- }
- ASSERT(is_object); // Can't use the handle unless there's a real object.
- message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
- location, HandleVector<Object>(&exception_handle, 1), stack_trace,
- stack_trace_object);
- }
- }
-
- // Save the message for reporting if the the exception remains uncaught.
- thread_local_top()->has_pending_message_ = report_exception;
- if (!message_obj.is_null()) {
- thread_local_top()->pending_message_obj_ = *message_obj;
- if (location != NULL) {
- thread_local_top()->pending_message_script_ = *location->script();
- thread_local_top()->pending_message_start_pos_ = location->start_pos();
- thread_local_top()->pending_message_end_pos_ = location->end_pos();
- }
- }
-
- // Do not forget to clean catcher_ if currently thrown exception cannot
- // be caught. If necessary, ReThrow will update the catcher.
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
-
- // NOTE: Notifying the debugger or generating the message
- // may have caused new exceptions. For now, we just ignore
- // that and set the pending exception to the original one.
- if (is_object) {
- set_pending_exception(*exception_handle);
- } else {
- // Failures are not on the heap so they neither need nor work with handles.
- ASSERT(exception_handle->IsFailure());
- set_pending_exception(exception);
- }
-}
-
-
-bool Isolate::IsExternallyCaught() {
- ASSERT(has_pending_exception());
-
- if ((thread_local_top()->catcher_ == NULL) ||
- (try_catch_handler() != thread_local_top()->catcher_)) {
- // When throwing the exception, we found no v8::TryCatch
- // which should care about this exception.
- return false;
- }
-
- if (!is_catchable_by_javascript(pending_exception())) {
- return true;
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- ASSERT(external_handler_address != NULL);
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-finally
- // handler.
- // There should be no try-catch blocks as they would prohibit us from
- // finding external catcher in the first place (see catcher_ check above).
- //
- // Note, that finally clause would rethrow an exception unless it's
- // aborted by jumps in control flow like return, break, etc. and we'll
- // have another chances to set proper v8::TryCatch.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && handler->address() < external_handler_address) {
- ASSERT(!handler->is_try_catch());
- if (handler->is_try_finally()) return false;
-
- handler = handler->next();
- }
-
- return true;
-}
-
-
-void Isolate::ReportPendingMessages() {
- ASSERT(has_pending_exception());
- PropagatePendingExceptionToExternalTryCatch();
-
- // If the pending exception is OutOfMemoryException set out_of_memory in
- // the global context. Note: We have to mark the global context here
- // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
- // set it.
- HandleScope scope;
- if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
- context()->mark_out_of_memory();
- } else if (thread_local_top_.pending_exception_ ==
- heap()->termination_exception()) {
- // Do nothing: if needed, the exception has been already propagated to
- // v8::TryCatch.
- } else {
- if (thread_local_top_.has_pending_message_) {
- thread_local_top_.has_pending_message_ = false;
- if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- HandleScope scope;
- Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
- if (thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
- int start_pos = thread_local_top_.pending_message_start_pos_;
- int end_pos = thread_local_top_.pending_message_end_pos_;
- MessageLocation location(script, start_pos, end_pos);
- MessageHandler::ReportMessage(this, &location, message_obj);
- } else {
- MessageHandler::ReportMessage(this, NULL, message_obj);
- }
- }
- }
- }
- clear_pending_message();
-}
-
-
-void Isolate::TraceException(bool flag) {
- FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
-}
-
-
-bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
- ASSERT(has_pending_exception());
- PropagatePendingExceptionToExternalTryCatch();
-
- // Allways reschedule out of memory exceptions.
- if (!is_out_of_memory()) {
- bool is_termination_exception =
- pending_exception() == heap_.termination_exception();
-
- // Do not reschedule the exception if this is the bottom call.
- bool clear_exception = is_bottom_call;
-
- if (is_termination_exception) {
- if (is_bottom_call) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- } else if (thread_local_top()->external_caught_exception_) {
- // If the exception is externally caught, clear it if there are no
- // JavaScript frames on the way to the C++ frame that has the
- // external handler.
- ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it;
- if (it.done() || (it.frame()->sp() > external_handler_address)) {
- clear_exception = true;
- }
- }
-
- // Clear the exception if needed.
- if (clear_exception) {
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- return false;
- }
- }
-
- // Reschedule the exception.
- thread_local_top()->scheduled_exception_ = pending_exception();
- clear_pending_exception();
- return true;
-}
-
-
-void Isolate::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
- capture_stack_trace_for_uncaught_exceptions_ = capture;
- stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
- stack_trace_for_uncaught_exceptions_options_ = options;
-}
-
-
-bool Isolate::is_out_of_memory() {
- if (has_pending_exception()) {
- MaybeObject* e = pending_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- if (has_scheduled_exception()) {
- MaybeObject* e = scheduled_exception();
- if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
- return true;
- }
- }
- return false;
-}
-
-
-Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global();
- return Handle<Context>(global->global_context());
-}
-
-
-Handle<Context> Isolate::GetCallingGlobalContext() {
- JavaScriptFrameIterator it;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (debug_->InDebugger()) {
- while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- if (context->global_context() == *debug_->debug_context()) {
- it.Advance();
- } else {
- break;
- }
- }
- }
-#endif // ENABLE_DEBUGGER_SUPPORT
- if (it.done()) return Handle<Context>::null();
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- return Handle<Context>(context->global_context());
-}
-
-
-char* Isolate::ArchiveThread(char* to) {
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateExitedJS(this);
- }
- memcpy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
- InitializeThreadLocal();
- return to + sizeof(ThreadLocalTop);
-}
-
-
-char* Isolate::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
- // This might be just paranoia, but it seems to be needed in case a
- // thread_local_top_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
- thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
- thread_local_top()->simulator_ = Simulator::current(this);
-#endif
-#endif
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateEnteredJS(this);
- }
- return from + sizeof(ThreadLocalTop);
-}
-
-} } // namespace v8::internal
diff --git a/src/type-info.cc b/src/type-info.cc
index 29e3fbd..346f8cb 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -229,6 +229,9 @@
return TypeInfo::Smi();
case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
+ case CompareIC::SYMBOLS:
+ case CompareIC::STRINGS:
+ return TypeInfo::String();
case CompareIC::OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
@@ -239,6 +242,16 @@
}
+bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+ Handle<Object> object = GetInfo(expr->id());
+ if (!object->IsCode()) return false;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (!code->is_compare_ic_stub()) return false;
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ return state == CompareIC::SYMBOLS;
+}
+
+
TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
@@ -428,6 +441,7 @@
Code::Kind kind = target->kind();
if (kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
+ kind == Code::TYPE_RECORDING_UNARY_OP_IC ||
kind == Code::COMPARE_IC) {
SetInfo(id, target);
} else if (state == MONOMORPHIC) {
diff --git a/src/type-info.h b/src/type-info.h
index 5bfd4cd..5ca3c15 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -72,32 +72,6 @@
// We haven't started collecting info yet.
static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
- // Return compact representation. Very sensitive to enum values below!
- // Compacting drops information about primitive types and strings types.
- // We use the compact representation when we only care about number types.
- int ThreeBitRepresentation() {
- ASSERT(type_ != kUninitialized);
- int answer = type_ & 0xf;
- answer = answer > 6 ? answer - 2 : answer;
- ASSERT(answer >= 0);
- ASSERT(answer <= 7);
- return answer;
- }
-
- // Decode compact representation. Very sensitive to enum values below!
- static TypeInfo ExpandedRepresentation(int three_bit_representation) {
- Type t = static_cast<Type>(three_bit_representation > 4 ?
- three_bit_representation + 2 :
- three_bit_representation);
- t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
- ASSERT(t == kUnknown ||
- t == kNumber ||
- t == kInteger32 ||
- t == kSmi ||
- t == kDouble);
- return TypeInfo(t);
- }
-
int ToInt() {
return type_;
}
@@ -264,6 +238,7 @@
TypeInfo UnaryType(UnaryOperation* expr);
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
+ bool IsSymbolCompare(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
diff --git a/src/uri.js b/src/uri.js
index e94b3fe..72ca6f1 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -166,7 +166,10 @@
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
var uriLength = uri.length;
- var result = new $Array(uriLength);
+ // We are going to pass result to %StringFromCharCodeArray
+ // which does not expect any getters/setters installed
+ // on the incoming array.
+ var result = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var cc1 = uri.charCodeAt(k);
@@ -192,7 +195,10 @@
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
var uriLength = uri.length;
- var result = new $Array(uriLength);
+ // We are going to pass result to %StringFromCharCodeArray
+ // which does not expect any getters/setters installed
+ // on the incoming array.
+ var result = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var ch = uri.charAt(k);
diff --git a/src/v8natives.js b/src/v8natives.js
index aa3645d..8db736c 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -235,7 +235,7 @@
// ECMA-262 - 15.2.4.5
function ObjectHasOwnProperty(V) {
- return %HasLocalProperty(ToObject(this), ToString(V));
+ return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
}
diff --git a/src/version.cc b/src/version.cc
index d6d4e52..ac95e39 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 3
-#define BUILD_NUMBER 6
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 7
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 60657aa..745fdae 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1215,7 +1215,7 @@
}
-void Assembler::j(Condition cc, Label* L, Hint hint, Label::Distance distance) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
if (cc == always) {
jmp(L);
return;
@@ -1224,7 +1224,6 @@
}
EnsureSpace ensure_space(this);
ASSERT(is_uint4(cc));
- if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e3fd910..7769b03 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -327,22 +327,6 @@
}
-enum Hint {
- no_hint = 0,
- not_taken = 0x2e,
- taken = 0x3e
-};
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition. That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
- return (hint == no_hint)
- ? no_hint
- : ((hint == not_taken) ? taken : not_taken);
-}
-
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -1214,11 +1198,7 @@
// Conditional jumps
void j(Condition cc,
Label* L,
- Hint hint,
Label::Distance distance = Label::kFar);
- void j(Condition cc, Label* L, Label::Distance distance = Label::kFar) {
- j(cc, L, no_hint, distance);
- }
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Floating-point operations
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 12294be..af9054b 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -4859,9 +4859,9 @@
__ j(either_smi, &miss, Label::kNear);
__ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken, Label::kNear);
+ __ j(not_equal, &miss, Label::kNear);
__ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, not_taken, Label::kNear);
+ __ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
__ subq(rax, rdx);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index fb61a6f..64a6a1d 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1552,6 +1552,31 @@
}
+void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ Register result = ToRegister(instr->result());
+
+ Label done;
+ __ cmpq(left, right);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ j(not_equal, &done, Label::kNear);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmpq(left, right);
+ EmitBranch(true_block, false_block, equal);
+}
+
+
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1723,6 +1748,40 @@
}
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label false_label, done;
+ __ JumpIfSmi(input, &false_label);
+ __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &false_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&false_label);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+ __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, not_zero);
+}
+
+
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index a850ecf..f5f2879 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -239,6 +239,13 @@
}
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_undetectable(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
@@ -1079,6 +1086,12 @@
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsIsUndetectable()) {
+ HIsUndetectable* compare = HIsUndetectable::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+ TempRegister());
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@@ -1107,6 +1120,10 @@
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()));
+ } else if (v->IsCompareSymbolEq()) {
+ HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
+ return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstanceOfAndBranch* result =
@@ -1189,7 +1206,7 @@
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return DefineAsRegister(new LContext);
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1514,6 +1531,15 @@
}
+LInstruction* LChunkBuilder::DoCompareSymbolEq(
+ HCompareSymbolEq* instr) {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1538,6 +1564,14 @@
}
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index c56bbfe..4aa235f 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -80,6 +80,8 @@
V(CmpJSObjectEq) \
V(CmpJSObjectEqAndBranch) \
V(CmpMapAndBranch) \
+ V(CmpSymbolEq) \
+ V(CmpSymbolEqAndBranch) \
V(CmpT) \
V(CmpTAndBranch) \
V(ConstantD) \
@@ -108,18 +110,23 @@
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
+ V(IsConstructCall) \
+ V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
+ V(IsUndetectable) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
+ V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
@@ -128,7 +135,6 @@
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(LoadFunctionPrototype) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -160,13 +166,11 @@
V(StringLength) \
V(SubI) \
V(TaggedToI) \
- V(ToFastProperties) \
V(Throw) \
+ V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
- V(IsConstructCall) \
- V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@@ -664,6 +668,28 @@
};
+class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCmpSymbolEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
+};
+
+
+class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+};
+
+
class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
@@ -738,6 +764,31 @@
};
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LIsUndetectable(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+ explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+ "is-undetectable-and-branch")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 559f807..0197178 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -80,7 +80,6 @@
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
- 'test-type-info.cc',
'test-unbound-queue.cc',
'test-utils.cc',
'test-version.cc'
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 2b9fbe9..f13f91e 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -113,7 +113,6 @@
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
- 'test-type-info.cc',
'test-unbound-queue.cc',
'test-utils.cc',
'test-version.cc'
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 9e2872f..47ea302 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -52,9 +52,6 @@
##############################################################################
[ $arch == arm ]
-# BUG(1375): Test crashes on ARM.
-test-lockers/MultithreadedParallelIsolates: SKIP
-
# We cannot assume that we can throw OutOfMemory exceptions in all situations.
# Apparently our ARM box is in such a state. Skip the test as it also runs for
# a long time.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 6552198..cf84538 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -3940,6 +3940,38 @@
}
+TEST(UndetectableOptimized) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope;
+ LocalContext env;
+
+ Local<String> obj = String::NewUndetectable("foo");
+ env->Global()->Set(v8_str("undetectable"), obj);
+ env->Global()->Set(v8_str("detectable"), v8_str("bar"));
+
+ ExpectString(
+ "function testBranch() {"
+ " if (!%_IsUndetectableObject(undetectable)) throw 1;"
+ " if (%_IsUndetectableObject(detectable)) throw 2;"
+ "}\n"
+ "function testBool() {"
+ " var b1 = !%_IsUndetectableObject(undetectable);"
+ " var b2 = %_IsUndetectableObject(detectable);"
+ " if (b1) throw 3;"
+ " if (b2) throw 4;"
+ " return b1 == b2;"
+ "}\n"
+ "%OptimizeFunctionOnNextCall(testBranch);"
+ "%OptimizeFunctionOnNextCall(testBool);"
+ "for (var i = 0; i < 10; i++) {"
+ " testBranch();"
+ " testBool();"
+ "}\n"
+ "\"PASS\"",
+ "PASS");
+}
+
+
template <typename T> static void USE(T) { }
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 576739b..e9d799b 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -102,7 +102,7 @@
__ bind(&C);
__ test(edx, Operand(edx));
- __ j(not_zero, &L, taken);
+ __ j(not_zero, &L);
__ ret(0);
CodeDesc desc;
@@ -140,7 +140,7 @@
__ bind(&C);
__ test(edx, Operand(edx));
- __ j(not_zero, &L, taken);
+ __ j(not_zero, &L);
__ ret(0);
// some relocated stuff here, not executed
@@ -351,10 +351,10 @@
__ fld_d(Operand(esp, 3 * kPointerSize));
__ fld_d(Operand(esp, 1 * kPointerSize));
__ FCmp();
- __ j(parity_even, &nan_l, taken);
- __ j(equal, &equal_l, taken);
- __ j(below, &less_l, taken);
- __ j(above, &greater_l, taken);
+ __ j(parity_even, &nan_l);
+ __ j(equal, &equal_l);
+ __ j(below, &less_l);
+ __ j(above, &greater_l);
__ mov(eax, kUndefined);
__ ret(0);
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index d3dd9c6..4c5f197 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -31,6 +31,8 @@
#include "v8.h"
#include "compiler.h"
+#include "disasm.h"
+#include "disassembler.h"
#include "execution.h"
#include "factory.h"
#include "platform.h"
@@ -348,3 +350,51 @@
CHECK_EQ(i, f->GetScriptLineNumber());
}
}
+
+
+#ifdef ENABLE_DISASSEMBLER
+static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
+ const char* property_name) {
+ v8::Local<v8::Function> fun =
+ v8::Local<v8::Function>::Cast(obj->Get(v8_str(property_name)));
+ return v8::Utils::OpenHandle(*fun);
+}
+
+
+static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
+ // Create a disassembler with default name lookup.
+ disasm::NameConverter name_converter;
+ disasm::Disassembler d(name_converter);
+
+ if (f->code()->kind() == Code::FUNCTION) {
+ Address pc = f->code()->instruction_start();
+ int decode_size =
+ Min(f->code()->instruction_size(),
+ static_cast<int>(f->code()->stack_check_table_offset()));
+ Address end = pc + decode_size;
+
+ v8::internal::EmbeddedVector<char, 128> decode_buffer;
+ while (pc < end) {
+ pc += d.InstructionDecode(decode_buffer, pc);
+ CHECK(strstr(decode_buffer.start(), "mov eax,0x178c29c") == NULL);
+ CHECK(strstr(decode_buffer.start(), "push 0x178c29c") == NULL);
+ CHECK(strstr(decode_buffer.start(), "0x178c29c") == NULL);
+ }
+ }
+}
+
+
+TEST(SplitConstantsInFullCompiler) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun("function f() { a = 12345678 }; f();");
+ CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CompileRun("function f(x) { a = 12345678 + x}; f(1);");
+ CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CompileRun("function f(x) { var arguments = 1; x += 12345678}; f(1);");
+ CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CompileRun("function f(x) { var arguments = 1; x = 12345678}; f(1);");
+ CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+}
+#endif
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
index 5ab84f9..056c981 100644
--- a/test/cctest/test-deoptimization.cc
+++ b/test/cctest/test-deoptimization.cc
@@ -97,8 +97,8 @@
};
-Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
- const char* property_name) {
+static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
+ const char* property_name) {
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(obj->Get(v8_str(property_name)));
return v8::Utils::OpenHandle(*fun);
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 65a2cf3..032e6bc 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -436,14 +436,14 @@
"ee0faa90 vmov s31, r10");
COMPARE(vabs(d0, d1),
- "eeb00bc1 vabs d0, d1");
+ "eeb00bc1 vabs.f64 d0, d1");
COMPARE(vabs(d3, d4, mi),
- "4eb03bc4 vabsmi d3, d4");
+ "4eb03bc4 vabs.f64mi d3, d4");
COMPARE(vneg(d0, d1),
- "eeb10b41 vneg d0, d1");
+ "eeb10b41 vneg.f64 d0, d1");
COMPARE(vneg(d3, d4, mi),
- "4eb13b44 vnegmi d3, d4");
+ "4eb13b44 vneg.f64mi d3, d4");
COMPARE(vadd(d0, d1, d2),
"ee310b02 vadd.f64 d0, d1, d2");
@@ -543,3 +543,206 @@
VERIFY_RUN();
}
+
+
+TEST(LoadStore) {
+ SETUP();
+
+ COMPARE(ldrb(r0, MemOperand(r1)),
+ "e5d10000 ldrb r0, [r1, #+0]");
+ COMPARE(ldrb(r2, MemOperand(r3, 42)),
+ "e5d3202a ldrb r2, [r3, #+42]");
+ COMPARE(ldrb(r4, MemOperand(r5, -42)),
+ "e555402a ldrb r4, [r5, #-42]");
+ COMPARE(ldrb(r6, MemOperand(r7, 42, PostIndex)),
+ "e4d7602a ldrb r6, [r7], #+42");
+ COMPARE(ldrb(r8, MemOperand(r9, -42, PostIndex)),
+ "e459802a ldrb r8, [r9], #-42");
+ COMPARE(ldrb(r10, MemOperand(fp, 42, PreIndex)),
+ "e5fba02a ldrb r10, [fp, #+42]!");
+ COMPARE(ldrb(ip, MemOperand(sp, -42, PreIndex)),
+ "e57dc02a ldrb ip, [sp, #-42]!");
+ COMPARE(ldrb(r0, MemOperand(r1, r2)),
+ "e7d10002 ldrb r0, [r1, +r2]");
+ COMPARE(ldrb(r0, MemOperand(r1, r2, NegOffset)),
+ "e7510002 ldrb r0, [r1, -r2]");
+ COMPARE(ldrb(r0, MemOperand(r1, r2, PostIndex)),
+ "e6d10002 ldrb r0, [r1], +r2");
+ COMPARE(ldrb(r0, MemOperand(r1, r2, NegPostIndex)),
+ "e6510002 ldrb r0, [r1], -r2");
+ COMPARE(ldrb(r0, MemOperand(r1, r2, PreIndex)),
+ "e7f10002 ldrb r0, [r1, +r2]!");
+ COMPARE(ldrb(r0, MemOperand(r1, r2, NegPreIndex)),
+ "e7710002 ldrb r0, [r1, -r2]!");
+
+ COMPARE(strb(r0, MemOperand(r1)),
+ "e5c10000 strb r0, [r1, #+0]");
+ COMPARE(strb(r2, MemOperand(r3, 42)),
+ "e5c3202a strb r2, [r3, #+42]");
+ COMPARE(strb(r4, MemOperand(r5, -42)),
+ "e545402a strb r4, [r5, #-42]");
+ COMPARE(strb(r6, MemOperand(r7, 42, PostIndex)),
+ "e4c7602a strb r6, [r7], #+42");
+ COMPARE(strb(r8, MemOperand(r9, -42, PostIndex)),
+ "e449802a strb r8, [r9], #-42");
+ COMPARE(strb(r10, MemOperand(fp, 42, PreIndex)),
+ "e5eba02a strb r10, [fp, #+42]!");
+ COMPARE(strb(ip, MemOperand(sp, -42, PreIndex)),
+ "e56dc02a strb ip, [sp, #-42]!");
+ COMPARE(strb(r0, MemOperand(r1, r2)),
+ "e7c10002 strb r0, [r1, +r2]");
+ COMPARE(strb(r0, MemOperand(r1, r2, NegOffset)),
+ "e7410002 strb r0, [r1, -r2]");
+ COMPARE(strb(r0, MemOperand(r1, r2, PostIndex)),
+ "e6c10002 strb r0, [r1], +r2");
+ COMPARE(strb(r0, MemOperand(r1, r2, NegPostIndex)),
+ "e6410002 strb r0, [r1], -r2");
+ COMPARE(strb(r0, MemOperand(r1, r2, PreIndex)),
+ "e7e10002 strb r0, [r1, +r2]!");
+ COMPARE(strb(r0, MemOperand(r1, r2, NegPreIndex)),
+ "e7610002 strb r0, [r1, -r2]!");
+
+ COMPARE(ldrh(r0, MemOperand(r1)),
+ "e1d100b0 ldrh r0, [r1, #+0]");
+ COMPARE(ldrh(r2, MemOperand(r3, 42)),
+ "e1d322ba ldrh r2, [r3, #+42]");
+ COMPARE(ldrh(r4, MemOperand(r5, -42)),
+ "e15542ba ldrh r4, [r5, #-42]");
+ COMPARE(ldrh(r6, MemOperand(r7, 42, PostIndex)),
+ "e0d762ba ldrh r6, [r7], #+42");
+ COMPARE(ldrh(r8, MemOperand(r9, -42, PostIndex)),
+ "e05982ba ldrh r8, [r9], #-42");
+ COMPARE(ldrh(r10, MemOperand(fp, 42, PreIndex)),
+ "e1fba2ba ldrh r10, [fp, #+42]!");
+ COMPARE(ldrh(ip, MemOperand(sp, -42, PreIndex)),
+ "e17dc2ba ldrh ip, [sp, #-42]!");
+ COMPARE(ldrh(r0, MemOperand(r1, r2)),
+ "e19100b2 ldrh r0, [r1, +r2]");
+ COMPARE(ldrh(r0, MemOperand(r1, r2, NegOffset)),
+ "e11100b2 ldrh r0, [r1, -r2]");
+ COMPARE(ldrh(r0, MemOperand(r1, r2, PostIndex)),
+ "e09100b2 ldrh r0, [r1], +r2");
+ COMPARE(ldrh(r0, MemOperand(r1, r2, NegPostIndex)),
+ "e01100b2 ldrh r0, [r1], -r2");
+ COMPARE(ldrh(r0, MemOperand(r1, r2, PreIndex)),
+ "e1b100b2 ldrh r0, [r1, +r2]!");
+ COMPARE(ldrh(r0, MemOperand(r1, r2, NegPreIndex)),
+ "e13100b2 ldrh r0, [r1, -r2]!");
+
+ COMPARE(strh(r0, MemOperand(r1)),
+ "e1c100b0 strh r0, [r1, #+0]");
+ COMPARE(strh(r2, MemOperand(r3, 42)),
+ "e1c322ba strh r2, [r3, #+42]");
+ COMPARE(strh(r4, MemOperand(r5, -42)),
+ "e14542ba strh r4, [r5, #-42]");
+ COMPARE(strh(r6, MemOperand(r7, 42, PostIndex)),
+ "e0c762ba strh r6, [r7], #+42");
+ COMPARE(strh(r8, MemOperand(r9, -42, PostIndex)),
+ "e04982ba strh r8, [r9], #-42");
+ COMPARE(strh(r10, MemOperand(fp, 42, PreIndex)),
+ "e1eba2ba strh r10, [fp, #+42]!");
+ COMPARE(strh(ip, MemOperand(sp, -42, PreIndex)),
+ "e16dc2ba strh ip, [sp, #-42]!");
+ COMPARE(strh(r0, MemOperand(r1, r2)),
+ "e18100b2 strh r0, [r1, +r2]");
+ COMPARE(strh(r0, MemOperand(r1, r2, NegOffset)),
+ "e10100b2 strh r0, [r1, -r2]");
+ COMPARE(strh(r0, MemOperand(r1, r2, PostIndex)),
+ "e08100b2 strh r0, [r1], +r2");
+ COMPARE(strh(r0, MemOperand(r1, r2, NegPostIndex)),
+ "e00100b2 strh r0, [r1], -r2");
+ COMPARE(strh(r0, MemOperand(r1, r2, PreIndex)),
+ "e1a100b2 strh r0, [r1, +r2]!");
+ COMPARE(strh(r0, MemOperand(r1, r2, NegPreIndex)),
+ "e12100b2 strh r0, [r1, -r2]!");
+
+ COMPARE(ldr(r0, MemOperand(r1)),
+ "e5910000 ldr r0, [r1, #+0]");
+ COMPARE(ldr(r2, MemOperand(r3, 42)),
+ "e593202a ldr r2, [r3, #+42]");
+ COMPARE(ldr(r4, MemOperand(r5, -42)),
+ "e515402a ldr r4, [r5, #-42]");
+ COMPARE(ldr(r6, MemOperand(r7, 42, PostIndex)),
+ "e497602a ldr r6, [r7], #+42");
+ COMPARE(ldr(r8, MemOperand(r9, -42, PostIndex)),
+ "e419802a ldr r8, [r9], #-42");
+ COMPARE(ldr(r10, MemOperand(fp, 42, PreIndex)),
+ "e5bba02a ldr r10, [fp, #+42]!");
+ COMPARE(ldr(ip, MemOperand(sp, -42, PreIndex)),
+ "e53dc02a ldr ip, [sp, #-42]!");
+ COMPARE(ldr(r0, MemOperand(r1, r2)),
+ "e7910002 ldr r0, [r1, +r2]");
+ COMPARE(ldr(r0, MemOperand(r1, r2, NegOffset)),
+ "e7110002 ldr r0, [r1, -r2]");
+ COMPARE(ldr(r0, MemOperand(r1, r2, PostIndex)),
+ "e6910002 ldr r0, [r1], +r2");
+ COMPARE(ldr(r0, MemOperand(r1, r2, NegPostIndex)),
+ "e6110002 ldr r0, [r1], -r2");
+ COMPARE(ldr(r0, MemOperand(r1, r2, PreIndex)),
+ "e7b10002 ldr r0, [r1, +r2]!");
+ COMPARE(ldr(r0, MemOperand(r1, r2, NegPreIndex)),
+ "e7310002 ldr r0, [r1, -r2]!");
+
+ COMPARE(str(r0, MemOperand(r1)),
+ "e5810000 str r0, [r1, #+0]");
+ COMPARE(str(r2, MemOperand(r3, 42)),
+ "e583202a str r2, [r3, #+42]");
+ COMPARE(str(r4, MemOperand(r5, -42)),
+ "e505402a str r4, [r5, #-42]");
+ COMPARE(str(r6, MemOperand(r7, 42, PostIndex)),
+ "e487602a str r6, [r7], #+42");
+ COMPARE(str(r8, MemOperand(r9, -42, PostIndex)),
+ "e409802a str r8, [r9], #-42");
+ COMPARE(str(r10, MemOperand(fp, 42, PreIndex)),
+ "e5aba02a str r10, [fp, #+42]!");
+ COMPARE(str(ip, MemOperand(sp, -42, PreIndex)),
+ "e52dc02a str ip, [sp, #-42]!");
+ COMPARE(str(r0, MemOperand(r1, r2)),
+ "e7810002 str r0, [r1, +r2]");
+ COMPARE(str(r0, MemOperand(r1, r2, NegOffset)),
+ "e7010002 str r0, [r1, -r2]");
+ COMPARE(str(r0, MemOperand(r1, r2, PostIndex)),
+ "e6810002 str r0, [r1], +r2");
+ COMPARE(str(r0, MemOperand(r1, r2, NegPostIndex)),
+ "e6010002 str r0, [r1], -r2");
+ COMPARE(str(r0, MemOperand(r1, r2, PreIndex)),
+ "e7a10002 str r0, [r1, +r2]!");
+ COMPARE(str(r0, MemOperand(r1, r2, NegPreIndex)),
+ "e7210002 str r0, [r1, -r2]!");
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ COMPARE(ldrd(r0, r1, MemOperand(r1)),
+ "e1c100d0 ldrd r0, [r1, #+0]");
+ COMPARE(ldrd(r2, r3, MemOperand(r3, 127)),
+ "e1c327df ldrd r2, [r3, #+127]");
+ COMPARE(ldrd(r4, r5, MemOperand(r5, -127)),
+ "e14547df ldrd r4, [r5, #-127]");
+ COMPARE(ldrd(r6, r7, MemOperand(r7, 127, PostIndex)),
+ "e0c767df ldrd r6, [r7], #+127");
+ COMPARE(ldrd(r8, r9, MemOperand(r9, -127, PostIndex)),
+ "e04987df ldrd r8, [r9], #-127");
+ COMPARE(ldrd(r10, fp, MemOperand(fp, 127, PreIndex)),
+ "e1eba7df ldrd r10, [fp, #+127]!");
+ COMPARE(ldrd(ip, sp, MemOperand(sp, -127, PreIndex)),
+ "e16dc7df ldrd ip, [sp, #-127]!");
+
+ COMPARE(strd(r0, r1, MemOperand(r1)),
+ "e1c100f0 strd r0, [r1, #+0]");
+ COMPARE(strd(r2, r3, MemOperand(r3, 127)),
+ "e1c327ff strd r2, [r3, #+127]");
+ COMPARE(strd(r4, r5, MemOperand(r5, -127)),
+ "e14547ff strd r4, [r5, #-127]");
+ COMPARE(strd(r6, r7, MemOperand(r7, 127, PostIndex)),
+ "e0c767ff strd r6, [r7], #+127");
+ COMPARE(strd(r8, r9, MemOperand(r9, -127, PostIndex)),
+ "e04987ff strd r8, [r9], #-127");
+ COMPARE(strd(r10, fp, MemOperand(fp, 127, PreIndex)),
+ "e1eba7ff strd r10, [fp, #+127]!");
+ COMPARE(strd(ip, sp, MemOperand(sp, -127, PreIndex)),
+ "e16dc7ff strd ip, [sp, #-127]!");
+ }
+
+ VERIFY_RUN();
+}
+
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index cb735c7..9f7d0bb 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -330,11 +330,6 @@
__ j(less_equal, &Ljcc);
__ j(greater, &Ljcc);
- // checking hints
- __ j(zero, &Ljcc, taken);
- __ j(zero, &Ljcc, not_taken);
-
- // __ mov(Operand::StaticVariable(Isolate::handler_address()), eax);
// 0xD9 instructions
__ nop();
diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc
index ba0fdb2..5b33f2e 100644
--- a/test/cctest/test-lockers.cc
+++ b/test/cctest/test-lockers.cc
@@ -240,7 +240,11 @@
// Run many threads each accessing its own isolate without locking
TEST(MultithreadedParallelIsolates) {
+#ifdef V8_TARGET_ARCH_ARM
+ const int kNThreads = 10;
+#else
const int kNThreads = 50;
+#endif
i::List<JoinableThread*> threads(kNThreads);
for (int i = 0; i < kNThreads; i++) {
threads.Add(new IsolateNonlockingThread());
diff --git a/test/cctest/test-type-info.cc b/test/cctest/test-type-info.cc
deleted file mode 100644
index 59dd83d..0000000
--- a/test/cctest/test-type-info.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "cctest.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-TEST(ThreeBitRepresentation) {
- // Numeric types and unknown should fit into the short
- // representation.
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::Unknown().ThreeBitRepresentation()).IsUnknown());
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::Number().ThreeBitRepresentation()).IsNumber());
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::Integer32().ThreeBitRepresentation()).IsInteger32());
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::Smi().ThreeBitRepresentation()).IsSmi());
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::Double().ThreeBitRepresentation()).IsDouble());
-
- // Other types should map to unknown.
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::Primitive().ThreeBitRepresentation()).IsUnknown());
- CHECK(TypeInfo::ExpandedRepresentation(
- TypeInfo::String().ThreeBitRepresentation()).IsUnknown());
-}
-
-} } // namespace v8::internal
diff --git a/src/frame-element.cc b/test/mjsunit/limit-locals.js
similarity index 68%
rename from src/frame-element.cc
rename to test/mjsunit/limit-locals.js
index f629900..ad9ec43 100644
--- a/src/frame-element.cc
+++ b/test/mjsunit/limit-locals.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,13 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+// Test that there is a limit of 32767 locals.
-#include "frame-element.h"
-#include "zone-inl.h"
+function function_with_n_locals(n) {
+ test_prefix = "prefix ";
+ test_suffix = " suffix";
+ var src = "test_prefix + (function () {"
+ for (var i = 1; i <= n; i++) {
+ src += "var x" + i + ";";
+ }
+ src += "return " + n + ";})() + test_suffix";
+ return eval(src);
+}
-namespace v8 {
-namespace internal {
+assertEquals("prefix 0 suffix", function_with_n_locals(0));
+assertEquals("prefix 16000 suffix", function_with_n_locals(16000));
+assertEquals("prefix 32767 suffix", function_with_n_locals(32767));
-
-} } // namespace v8::internal
+assertThrows("function_with_n_locals(32768)");
+assertThrows("function_with_n_locals(100000)");
diff --git a/tools/gcmole/gccause.lua b/tools/gcmole/gccause.lua
new file mode 100644
index 0000000..a6fe542
--- /dev/null
+++ b/tools/gcmole/gccause.lua
@@ -0,0 +1,60 @@
+-- Copyright 2011 the V8 project authors. All rights reserved.
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are
+-- met:
+--
+-- * Redistributions of source code must retain the above copyright
+-- notice, this list of conditions and the following disclaimer.
+-- * Redistributions in binary form must reproduce the above
+-- copyright notice, this list of conditions and the following
+-- disclaimer in the documentation and/or other materials provided
+-- with the distribution.
+-- * Neither the name of Google Inc. nor the names of its
+-- contributors may be used to endorse or promote products derived
+-- from this software without specific prior written permission.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-- This is an auxiliary tool that reads gccauses file generated by
+-- gcmole.lua and prints tree of the calls that can potentially cause a GC
+-- inside a given function.
+--
+-- Usage: lua tools/gcmole/gccause.lua <function-name-pattern>
+--
+
+assert(loadfile "gccauses")()
+
+local P = ...
+
+local T = {}
+
+local function TrackCause(name, lvl)
+ io.write((" "):rep(lvl or 0), name, "\n")
+ if GC[name] then
+ local causes = GC[name]
+ for i = 1, #causes do
+ local f = causes[i]
+ if not T[f] then
+ T[f] = true
+ TrackCause(f, (lvl or 0) + 1)
+ end
+ end
+ end
+end
+
+for name, _ in pairs(GC) do
+ if name:match(P) then
+ T = {}
+ TrackCause(name)
+ end
+end
diff --git a/tools/gcmole/gcmole.cc b/tools/gcmole/gcmole.cc
index ad64c1d..71ba24a 100644
--- a/tools/gcmole/gcmole.cc
+++ b/tools/gcmole/gcmole.cc
@@ -69,6 +69,47 @@
}
+struct Resolver {
+ explicit Resolver(clang::ASTContext& ctx)
+ : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) {
+ }
+
+ Resolver(clang::ASTContext& ctx, clang::DeclContext* decl_ctx)
+ : ctx_(ctx), decl_ctx_(decl_ctx) {
+ }
+
+ clang::DeclarationName ResolveName(const char* n) {
+ clang::IdentifierInfo* ident = &ctx_.Idents.get(n);
+ return ctx_.DeclarationNames.getIdentifier(ident);
+ }
+
+ Resolver ResolveNamespace(const char* n) {
+ return Resolver(ctx_, Resolve<clang::NamespaceDecl>(n));
+ }
+
+ template<typename T>
+ T* Resolve(const char* n) {
+ if (decl_ctx_ == NULL) return NULL;
+
+ clang::DeclContext::lookup_result result =
+ decl_ctx_->lookup(ResolveName(n));
+
+ clang::DeclContext::lookup_iterator end = result.second;
+ for (clang::DeclContext::lookup_iterator i = result.first;
+ i != end;
+ i++) {
+ if (isa<T>(*i)) return cast<T>(*i);
+ }
+
+ return NULL;
+ }
+
+ private:
+ clang::ASTContext& ctx_;
+ clang::DeclContext* decl_ctx_;
+};
+
+
class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
public:
explicit CalleesPrinter(clang::MangleContext* ctx) : ctx_(ctx) {
@@ -140,12 +181,14 @@
Callgraph callgraph_;
};
+
class FunctionDeclarationFinder
: public clang::ASTConsumer,
public clang::RecursiveASTVisitor<FunctionDeclarationFinder> {
public:
explicit FunctionDeclarationFinder(clang::Diagnostic& d,
- clang::SourceManager& sm)
+ clang::SourceManager& sm,
+ const std::vector<std::string>& args)
: d_(d), sm_(sm) { }
virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
@@ -202,100 +245,807 @@
}
-static bool IsHandleType(const clang::DeclarationName& handleDeclName,
- const clang::QualType& qtype) {
- const clang::Type* canonical_type =
- qtype.getTypePtr()->getCanonicalTypeUnqualified().getTypePtr();
+static const int kNoEffect = 0;
+static const int kCausesGC = 1;
+static const int kRawDef = 2;
+static const int kRawUse = 4;
+static const int kAllEffects = kCausesGC | kRawDef | kRawUse;
- if (const clang::TemplateSpecializationType* type =
- canonical_type->getAs<clang::TemplateSpecializationType>()) {
- if (clang::TemplateDecl* decl =
- type->getTemplateName().getAsTemplateDecl()) {
- if (decl->getTemplatedDecl()->getDeclName() == handleDeclName) {
- return true;
+class Environment;
+
+class ExprEffect {
+ public:
+ bool hasGC() { return (effect_ & kCausesGC) != 0; }
+ void setGC() { effect_ |= kCausesGC; }
+
+ bool hasRawDef() { return (effect_ & kRawDef) != 0; }
+ void setRawDef() { effect_ |= kRawDef; }
+
+ bool hasRawUse() { return (effect_ & kRawUse) != 0; }
+ void setRawUse() { effect_ |= kRawUse; }
+
+ static ExprEffect None() { return ExprEffect(kNoEffect, NULL); }
+ static ExprEffect NoneWithEnv(Environment* env) {
+ return ExprEffect(kNoEffect, env);
+ }
+ static ExprEffect RawUse() { return ExprEffect(kRawUse, NULL); }
+
+ static ExprEffect Merge(ExprEffect a, ExprEffect b);
+ static ExprEffect MergeSeq(ExprEffect a, ExprEffect b);
+ ExprEffect Define(const std::string& name);
+
+ Environment* env() {
+ return reinterpret_cast<Environment*>(effect_ & ~kAllEffects);
+ }
+
+ private:
+ ExprEffect(int effect, Environment* env)
+ : effect_((effect & kAllEffects) |
+ reinterpret_cast<intptr_t>(env)) { }
+
+ intptr_t effect_;
+};
+
+
+const std::string BAD_EXPR_MSG("Possible problem with evaluation order.");
+const std::string DEAD_VAR_MSG("Possibly dead variable.");
+
+
+class Environment {
+ public:
+ Environment() { }
+
+ static Environment Unreachable() {
+ Environment env;
+ env.live_.set();
+ return env;
+ }
+
+ static Environment Merge(const Environment& l,
+ const Environment& r) {
+ return Environment(l, r);
+ }
+
+ Environment ApplyEffect(ExprEffect effect) const {
+ Environment out = effect.hasGC() ? Environment() : Environment(*this);
+ if (effect.env() != NULL) out.live_ |= effect.env()->live_;
+ return out;
+ }
+
+ typedef std::map<std::string, int> SymbolTable;
+
+ bool IsAlive(const std::string& name) const {
+ SymbolTable::iterator code = symbol_table_.find(name);
+ if (code == symbol_table_.end()) return false;
+ return live_[code->second];
+ }
+
+ bool Equal(const Environment& env) {
+ return live_ == env.live_;
+ }
+
+ Environment Define(const std::string& name) const {
+ return Environment(*this, SymbolToCode(name));
+ }
+
+ void MDefine(const std::string& name) {
+ live_.set(SymbolToCode(name));
+ }
+
+ static int SymbolToCode(const std::string& name) {
+ SymbolTable::iterator code = symbol_table_.find(name);
+
+ if (code == symbol_table_.end()) {
+ int new_code = symbol_table_.size();
+ symbol_table_.insert(std::make_pair(name, new_code));
+ return new_code;
+ }
+
+ return code->second;
+ }
+
+ static void ClearSymbolTable() {
+ std::vector<Environment*>::iterator end = envs_.end();
+ for (std::vector<Environment*>::iterator i = envs_.begin();
+ i != end;
+ ++i) {
+ delete *i;
+ }
+ envs_.clear();
+ symbol_table_.clear();
+ }
+
+ void Print() const {
+ bool comma = false;
+ std::cout << "{";
+ SymbolTable::iterator end = symbol_table_.end();
+ for (SymbolTable::iterator i = symbol_table_.begin();
+ i != end;
+ ++i) {
+ if (live_[i->second]) {
+ if (comma) std::cout << ", ";
+ std::cout << i->first;
+ comma = true;
}
}
- } else if (const clang::RecordType* type =
- canonical_type->getAs<clang::RecordType>()) {
- if (const clang::ClassTemplateSpecializationDecl* t =
- dyn_cast<clang::ClassTemplateSpecializationDecl>(type->getDecl())) {
- if (t->getSpecializedTemplate()->getDeclName() == handleDeclName) {
- return true;
- }
+ std::cout << "}";
+ }
+
+ static Environment* Allocate(const Environment& env) {
+ Environment* allocated_env = new Environment(env);
+ envs_.push_back(allocated_env);
+ return allocated_env;
+ }
+
+ private:
+ Environment(const Environment& l, const Environment& r)
+ : live_(l.live_ & r.live_) {
+ }
+
+ Environment(const Environment& l, int code)
+ : live_(l.live_) {
+ live_.set(code);
+ }
+
+ static SymbolTable symbol_table_;
+ static std::vector<Environment* > envs_;
+
+ static const int kMaxNumberOfLocals = 256;
+ std::bitset<kMaxNumberOfLocals> live_;
+
+ friend class ExprEffect;
+ friend class CallProps;
+};
+
+
+class CallProps {
+ public:
+ CallProps() : env_(NULL) { }
+
+ void SetEffect(int arg, ExprEffect in) {
+ if (in.hasGC()) gc_.set(arg);
+ if (in.hasRawDef()) raw_def_.set(arg);
+ if (in.hasRawUse()) raw_use_.set(arg);
+ if (in.env() != NULL) {
+ if (env_ == NULL) env_ = in.env();
+ env_->live_ |= in.env()->live_;
}
}
- return false;
+ ExprEffect ComputeCumulativeEffect(bool result_is_raw) {
+ ExprEffect out = ExprEffect::NoneWithEnv(env_);
+ if (gc_.any()) out.setGC();
+ if (raw_use_.any()) out.setRawUse();
+ if (result_is_raw) out.setRawDef();
+ return out;
+ }
+
+ bool IsSafe() {
+ if (!gc_.any()) return true;
+ std::bitset<kMaxNumberOfArguments> raw = (raw_def_ | raw_use_);
+ if (!raw.any()) return true;
+ return gc_.count() == 1 && !((raw ^ gc_).any());
+ }
+
+ private:
+ static const int kMaxNumberOfArguments = 64;
+ std::bitset<kMaxNumberOfArguments> raw_def_;
+ std::bitset<kMaxNumberOfArguments> raw_use_;
+ std::bitset<kMaxNumberOfArguments> gc_;
+ Environment* env_;
+};
+
+
+Environment::SymbolTable Environment::symbol_table_;
+std::vector<Environment* > Environment::envs_;
+
+
+ExprEffect ExprEffect::Merge(ExprEffect a, ExprEffect b) {
+ Environment* a_env = a.env();
+ Environment* b_env = b.env();
+ Environment* out = NULL;
+ if (a_env != NULL && b_env != NULL) {
+ out = Environment::Allocate(*a_env);
+ out->live_ &= b_env->live_;
+ }
+ return ExprEffect(a.effect_ | b.effect_, out);
}
-class ExpressionClassifier :
- public clang::RecursiveASTVisitor<ExpressionClassifier> {
+ExprEffect ExprEffect::MergeSeq(ExprEffect a, ExprEffect b) {
+ Environment* a_env = b.hasGC() ? NULL : a.env();
+ Environment* b_env = b.env();
+ Environment* out = (b_env == NULL) ? a_env : b_env;
+ if (a_env != NULL && b_env != NULL) {
+ out = Environment::Allocate(*b_env);
+ out->live_ |= a_env->live_;
+ }
+ return ExprEffect(a.effect_ | b.effect_, out);
+}
+
+
+ExprEffect ExprEffect::Define(const std::string& name) {
+ Environment* e = env();
+ if (e == NULL) {
+ e = Environment::Allocate(Environment());
+ }
+ e->MDefine(name);
+ return ExprEffect(effect_, e);
+}
+
+
+static std::string THIS ("this");
+
+
+class FunctionAnalyzer {
public:
- ExpressionClassifier(clang::DeclarationName handleDeclName,
- clang::MangleContext* ctx,
- clang::CXXRecordDecl* objectDecl)
- : handleDeclName_(handleDeclName),
- ctx_(ctx),
- objectDecl_(objectDecl) {
+ FunctionAnalyzer(clang::MangleContext* ctx,
+ clang::DeclarationName handle_decl_name,
+ clang::CXXRecordDecl* object_decl,
+ clang::CXXRecordDecl* smi_decl,
+ clang::Diagnostic& d,
+ clang::SourceManager& sm,
+ bool dead_vars_analysis)
+ : ctx_(ctx),
+ handle_decl_name_(handle_decl_name),
+ object_decl_(object_decl),
+ smi_decl_(smi_decl),
+ d_(d),
+ sm_(sm),
+ block_(NULL),
+ dead_vars_analysis_(dead_vars_analysis) {
}
- bool IsBadExpression(clang::Expr* expr) {
- has_derefs_ = has_gc_ = false;
- TraverseStmt(expr);
- return has_derefs_ && has_gc_;
+
+ // --------------------------------------------------------------------------
+ // Expressions
+ // --------------------------------------------------------------------------
+
+ ExprEffect VisitExpr(clang::Expr* expr, const Environment& env) {
+#define VISIT(type) do { \
+ clang::type* concrete_expr = dyn_cast_or_null<clang::type>(expr); \
+ if (concrete_expr != NULL) { \
+ return Visit##type (concrete_expr, env); \
+ } \
+ } while(0);
+
+ VISIT(AbstractConditionalOperator);
+ VISIT(AddrLabelExpr);
+ VISIT(ArraySubscriptExpr);
+ VISIT(BinaryOperator);
+ VISIT(BinaryTypeTraitExpr);
+ VISIT(BlockDeclRefExpr);
+ VISIT(BlockExpr);
+ VISIT(CallExpr);
+ VISIT(CastExpr);
+ VISIT(CharacterLiteral);
+ VISIT(ChooseExpr);
+ VISIT(CompoundLiteralExpr);
+ VISIT(CXXBindTemporaryExpr);
+ VISIT(CXXBoolLiteralExpr);
+ VISIT(CXXConstructExpr);
+ VISIT(CXXDefaultArgExpr);
+ VISIT(CXXDeleteExpr);
+ VISIT(CXXDependentScopeMemberExpr);
+ VISIT(CXXNewExpr);
+ VISIT(CXXNoexceptExpr);
+ VISIT(CXXNullPtrLiteralExpr);
+ VISIT(CXXPseudoDestructorExpr);
+ VISIT(CXXScalarValueInitExpr);
+ VISIT(CXXThisExpr);
+ VISIT(CXXThrowExpr);
+ VISIT(CXXTypeidExpr);
+ VISIT(CXXUnresolvedConstructExpr);
+ VISIT(CXXUuidofExpr);
+ VISIT(DeclRefExpr);
+ VISIT(DependentScopeDeclRefExpr);
+ VISIT(DesignatedInitExpr);
+ VISIT(ExprWithCleanups);
+ VISIT(ExtVectorElementExpr);
+ VISIT(FloatingLiteral);
+ VISIT(GNUNullExpr);
+ VISIT(ImaginaryLiteral);
+ VISIT(ImplicitValueInitExpr);
+ VISIT(InitListExpr);
+ VISIT(IntegerLiteral);
+ VISIT(MemberExpr);
+ VISIT(OffsetOfExpr);
+ VISIT(OpaqueValueExpr);
+ VISIT(OverloadExpr);
+ VISIT(PackExpansionExpr);
+ VISIT(ParenExpr);
+ VISIT(ParenListExpr);
+ VISIT(PredefinedExpr);
+ VISIT(ShuffleVectorExpr);
+ VISIT(SizeOfPackExpr);
+ VISIT(StmtExpr);
+ VISIT(StringLiteral);
+ VISIT(SubstNonTypeTemplateParmPackExpr);
+ VISIT(UnaryExprOrTypeTraitExpr);
+ VISIT(UnaryOperator);
+ VISIT(UnaryTypeTraitExpr);
+ VISIT(VAArgExpr);
+#undef VISIT
+
+ return ExprEffect::None();
}
- bool IsBadCallSite(clang::Expr* expr) {
- if (isa<clang::CallExpr>(expr)) {
- clang::CallExpr* call = cast<clang::CallExpr>(expr);
+#define DECL_VISIT_EXPR(type) \
+ ExprEffect Visit##type (clang::type* expr, const Environment& env)
- MarkGCSuspectAsArgument(call);
- MarkHandleDereferenceAsArgument(call);
+#define IGNORE_EXPR(type) \
+ ExprEffect Visit##type (clang::type* expr, const Environment& env) { \
+ return ExprEffect::None(); \
+ }
- return derefs_.any() &&
- ((gc_.count() > 1) || (gc_.any() && (gc_ ^ derefs_).any()));
+ IGNORE_EXPR(AddrLabelExpr);
+ IGNORE_EXPR(BinaryTypeTraitExpr);
+ IGNORE_EXPR(BlockExpr);
+ IGNORE_EXPR(CharacterLiteral);
+ IGNORE_EXPR(ChooseExpr);
+ IGNORE_EXPR(CompoundLiteralExpr);
+ IGNORE_EXPR(CXXBoolLiteralExpr);
+ IGNORE_EXPR(CXXDependentScopeMemberExpr);
+ IGNORE_EXPR(CXXNullPtrLiteralExpr);
+ IGNORE_EXPR(CXXPseudoDestructorExpr);
+ IGNORE_EXPR(CXXScalarValueInitExpr);
+ IGNORE_EXPR(CXXNoexceptExpr);
+ IGNORE_EXPR(CXXTypeidExpr);
+ IGNORE_EXPR(CXXUnresolvedConstructExpr);
+ IGNORE_EXPR(CXXUuidofExpr);
+ IGNORE_EXPR(DependentScopeDeclRefExpr);
+ IGNORE_EXPR(DesignatedInitExpr);
+ IGNORE_EXPR(ExtVectorElementExpr);
+ IGNORE_EXPR(FloatingLiteral);
+ IGNORE_EXPR(ImaginaryLiteral);
+ IGNORE_EXPR(IntegerLiteral);
+ IGNORE_EXPR(OffsetOfExpr);
+ IGNORE_EXPR(ImplicitValueInitExpr);
+ IGNORE_EXPR(PackExpansionExpr);
+ IGNORE_EXPR(PredefinedExpr);
+ IGNORE_EXPR(ShuffleVectorExpr);
+ IGNORE_EXPR(SizeOfPackExpr);
+ IGNORE_EXPR(StmtExpr);
+ IGNORE_EXPR(StringLiteral);
+ IGNORE_EXPR(SubstNonTypeTemplateParmPackExpr);
+ IGNORE_EXPR(UnaryExprOrTypeTraitExpr);
+ IGNORE_EXPR(UnaryTypeTraitExpr);
+ IGNORE_EXPR(VAArgExpr);
+ IGNORE_EXPR(GNUNullExpr);
+ IGNORE_EXPR(OverloadExpr);
+
+ DECL_VISIT_EXPR(CXXThisExpr) {
+ return Use(expr, expr->getType(), THIS, env);
+ }
+
+ DECL_VISIT_EXPR(AbstractConditionalOperator) {
+ Environment after_cond = env.ApplyEffect(VisitExpr(expr->getCond(), env));
+ return ExprEffect::Merge(VisitExpr(expr->getTrueExpr(), after_cond),
+ VisitExpr(expr->getFalseExpr(), after_cond));
+ }
+
+ DECL_VISIT_EXPR(ArraySubscriptExpr) {
+ clang::Expr* exprs[2] = {expr->getBase(), expr->getIdx()};
+ return Par(expr, 2, exprs, env);
+ }
+
+ bool IsRawPointerVar(clang::Expr* expr, std::string* var_name) {
+ if (isa<clang::BlockDeclRefExpr>(expr)) {
+ *var_name = cast<clang::BlockDeclRefExpr>(expr)->getDecl()->
+ getNameAsString();
+ return true;
+ } else if (isa<clang::DeclRefExpr>(expr)) {
+ *var_name = cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString();
+ return true;
}
return false;
}
- virtual bool VisitExpr(clang::Expr* expr) {
- has_derefs_ = has_derefs_ || IsRawPointerType(expr);
- return !has_gc_ || !has_derefs_;
+ DECL_VISIT_EXPR(BinaryOperator) {
+ clang::Expr* lhs = expr->getLHS();
+ clang::Expr* rhs = expr->getRHS();
+ clang::Expr* exprs[2] = {lhs, rhs};
+
+ switch (expr->getOpcode()) {
+ case clang::BO_Comma:
+ return Seq(expr, 2, exprs, env);
+
+ case clang::BO_LAnd:
+ case clang::BO_LOr:
+ return ExprEffect::Merge(VisitExpr(lhs, env), VisitExpr(rhs, env));
+
+ case clang::BO_Assign: {
+ std::string var_name;
+ if (IsRawPointerVar(lhs, &var_name)) {
+ return VisitExpr(rhs, env).Define(var_name);
+ }
+ return Par(expr, 2, exprs, env);
+ }
+
+ default:
+ return Par(expr, 2, exprs, env);
+ }
}
- virtual bool VisitCallExpr(clang::CallExpr* expr) {
- has_gc_ = has_gc_ || CanCauseGC(expr);
- return !has_gc_ || !has_derefs_;
+ DECL_VISIT_EXPR(CXXBindTemporaryExpr) {
+ return VisitExpr(expr->getSubExpr(), env);
}
- private:
- void MarkHandleDereferenceAsArgument(clang::CallExpr* call) {
- derefs_.reset();
- if (clang::CXXMemberCallExpr* memcall =
- dyn_cast<clang::CXXMemberCallExpr>(call)) {
- if (ManipulatesRawPointers(memcall->getImplicitObjectArgument())) {
- derefs_.set(0);
+ DECL_VISIT_EXPR(CXXConstructExpr) {
+ return VisitArguments<>(expr, env);
+ }
+
+ DECL_VISIT_EXPR(CXXDefaultArgExpr) {
+ return VisitExpr(expr->getExpr(), env);
+ }
+
+ DECL_VISIT_EXPR(CXXDeleteExpr) {
+ return VisitExpr(expr->getArgument(), env);
+ }
+
+ DECL_VISIT_EXPR(CXXNewExpr) {
+ return Par(expr,
+ expr->getNumConstructorArgs(),
+ expr->getConstructorArgs(),
+ env);
+ }
+
+ DECL_VISIT_EXPR(ExprWithCleanups) {
+ return VisitExpr(expr->getSubExpr(), env);
+ }
+
+ DECL_VISIT_EXPR(CXXThrowExpr) {
+ return VisitExpr(expr->getSubExpr(), env);
+ }
+
+ DECL_VISIT_EXPR(InitListExpr) {
+ return Seq(expr, expr->getNumInits(), expr->getInits(), env);
+ }
+
+ DECL_VISIT_EXPR(MemberExpr) {
+ return VisitExpr(expr->getBase(), env);
+ }
+
+ DECL_VISIT_EXPR(OpaqueValueExpr) {
+ return VisitExpr(expr->getSourceExpr(), env);
+ }
+
+ DECL_VISIT_EXPR(ParenExpr) {
+ return VisitExpr(expr->getSubExpr(), env);
+ }
+
+ DECL_VISIT_EXPR(ParenListExpr) {
+ return Par(expr, expr->getNumExprs(), expr->getExprs(), env);
+ }
+
+ DECL_VISIT_EXPR(UnaryOperator) {
+ // TODO We are treating all expressions that look like &raw_pointer_var
+ // as definitions of raw_pointer_var. This should be changed to
+ // recognize less generic pattern:
+ //
+ // if (maybe_object->ToObject(&obj)) return maybe_object;
+ //
+ if (expr->getOpcode() == clang::UO_AddrOf) {
+ std::string var_name;
+ if (IsRawPointerVar(expr->getSubExpr(), &var_name)) {
+ return ExprEffect::None().Define(var_name);
}
}
+ return VisitExpr(expr->getSubExpr(), env);
+ }
+ DECL_VISIT_EXPR(CastExpr) {
+ return VisitExpr(expr->getSubExpr(), env);
+ }
+
+ DECL_VISIT_EXPR(DeclRefExpr) {
+ return Use(expr, expr->getDecl(), env);
+ }
+
+ DECL_VISIT_EXPR(BlockDeclRefExpr) {
+ return Use(expr, expr->getDecl(), env);
+ }
+
+ ExprEffect Par(clang::Expr* parent,
+ int n,
+ clang::Expr** exprs,
+ const Environment& env) {
+ CallProps props;
+
+ for (int i = 0; i < n; ++i) {
+ props.SetEffect(i, VisitExpr(exprs[i], env));
+ }
+
+ if (!props.IsSafe()) ReportUnsafe(parent, BAD_EXPR_MSG);
+
+ return props.ComputeCumulativeEffect(IsRawPointerType(parent->getType()));
+ }
+
+ ExprEffect Seq(clang::Stmt* parent,
+ int n,
+ clang::Expr** exprs,
+ const Environment& env) {
+ ExprEffect out = ExprEffect::None();
+ Environment out_env = env;
+ for (int i = 0; i < n; ++i) {
+ out = ExprEffect::MergeSeq(out, VisitExpr(exprs[i], out_env));
+ out_env = out_env.ApplyEffect(out);
+ }
+ return out;
+ }
+
+ ExprEffect Use(const clang::Expr* parent,
+ const clang::QualType& var_type,
+ const std::string& var_name,
+ const Environment& env) {
+ if (IsRawPointerType(var_type)) {
+ if (!env.IsAlive(var_name) && dead_vars_analysis_) {
+ ReportUnsafe(parent, DEAD_VAR_MSG);
+ }
+ return ExprEffect::RawUse();
+ }
+ return ExprEffect::None();
+ }
+
+ ExprEffect Use(const clang::Expr* parent,
+ const clang::ValueDecl* var,
+ const Environment& env) {
+ return Use(parent, var->getType(), var->getNameAsString(), env);
+ }
+
+
+ template<typename ExprType>
+ ExprEffect VisitArguments(ExprType* call, const Environment& env) {
+ CallProps props;
+ VisitArguments<>(call, &props, env);
+ if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
+ return props.ComputeCumulativeEffect(IsRawPointerType(call->getType()));
+ }
+
+ template<typename ExprType>
+ void VisitArguments(ExprType* call,
+ CallProps* props,
+ const Environment& env) {
for (unsigned arg = 0; arg < call->getNumArgs(); arg++) {
- if (ManipulatesRawPointers(call->getArg(arg))) derefs_.set(arg + 1);
+ props->SetEffect(arg + 1, VisitExpr(call->getArg(arg), env));
}
}
- void MarkGCSuspectAsArgument(clang::CallExpr* call) {
- gc_.reset();
+
+ ExprEffect VisitCallExpr(clang::CallExpr* call,
+ const Environment& env) {
+ CallProps props;
clang::CXXMemberCallExpr* memcall =
dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
- if (memcall != NULL && CanCauseGC(memcall->getImplicitObjectArgument())) {
- gc_.set(0);
+ if (memcall != NULL) {
+ clang::Expr* receiver = memcall->getImplicitObjectArgument();
+ props.SetEffect(0, VisitExpr(receiver, env));
}
- for (unsigned arg = 0; arg < call->getNumArgs(); arg++) {
- if (CanCauseGC(call->getArg(arg))) gc_.set(arg + 1);
+ VisitArguments<>(call, &props, env);
+
+ if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
+
+ ExprEffect out =
+ props.ComputeCumulativeEffect(IsRawPointerType(call->getType()));
+
+ clang::FunctionDecl* callee = call->getDirectCallee();
+ if ((callee != NULL) && KnownToCauseGC(ctx_, callee)) {
+ out.setGC();
}
+
+ return out;
+ }
+
+ // --------------------------------------------------------------------------
+ // Statements
+ // --------------------------------------------------------------------------
+
+ Environment VisitStmt(clang::Stmt* stmt, const Environment& env) {
+#define VISIT(type) do { \
+ clang::type* concrete_stmt = dyn_cast_or_null<clang::type>(stmt); \
+ if (concrete_stmt != NULL) { \
+ return Visit##type (concrete_stmt, env); \
+ } \
+ } while(0);
+
+ if (clang::Expr* expr = dyn_cast_or_null<clang::Expr>(stmt)) {
+ return env.ApplyEffect(VisitExpr(expr, env));
+ }
+
+ VISIT(AsmStmt);
+ VISIT(BreakStmt);
+ VISIT(CompoundStmt);
+ VISIT(ContinueStmt);
+ VISIT(CXXCatchStmt);
+ VISIT(CXXTryStmt);
+ VISIT(DeclStmt);
+ VISIT(DoStmt);
+ VISIT(ForStmt);
+ VISIT(GotoStmt);
+ VISIT(IfStmt);
+ VISIT(IndirectGotoStmt);
+ VISIT(LabelStmt);
+ VISIT(NullStmt);
+ VISIT(ReturnStmt);
+ VISIT(CaseStmt);
+ VISIT(DefaultStmt);
+ VISIT(SwitchStmt);
+ VISIT(WhileStmt);
+#undef VISIT
+
+ return env;
+ }
+
+#define DECL_VISIT_STMT(type) \
+ Environment Visit##type (clang::type* stmt, const Environment& env)
+
+#define IGNORE_STMT(type) \
+ Environment Visit##type (clang::type* stmt, const Environment& env) { \
+ return env; \
+ }
+
+ IGNORE_STMT(IndirectGotoStmt);
+ IGNORE_STMT(NullStmt);
+ IGNORE_STMT(AsmStmt);
+
+ // We are ignoring control flow for simplicity.
+ IGNORE_STMT(GotoStmt);
+ IGNORE_STMT(LabelStmt);
+
+ // We are ignoring try/catch because V8 does not use them.
+ IGNORE_STMT(CXXCatchStmt);
+ IGNORE_STMT(CXXTryStmt);
+
+ class Block {
+ public:
+ Block(const Environment& in,
+ FunctionAnalyzer* owner)
+ : in_(in),
+ out_(Environment::Unreachable()),
+ changed_(false),
+ owner_(owner) {
+ parent_ = owner_->EnterBlock(this);
+ }
+
+ ~Block() {
+ owner_->LeaveBlock(parent_);
+ }
+
+ void MergeIn(const Environment& env) {
+ Environment old_in = in_;
+ in_ = Environment::Merge(in_, env);
+ changed_ = !old_in.Equal(in_);
+ }
+
+ bool changed() {
+ if (changed_) {
+ changed_ = false;
+ return true;
+ }
+ return false;
+ }
+
+ const Environment& in() {
+ return in_;
+ }
+
+ const Environment& out() {
+ return out_;
+ }
+
+ void MergeOut(const Environment& env) {
+ out_ = Environment::Merge(out_, env);
+ }
+
+ void Seq(clang::Stmt* a, clang::Stmt* b, clang::Stmt* c) {
+ Environment a_out = owner_->VisitStmt(a, in());
+ Environment b_out = owner_->VisitStmt(b, a_out);
+ Environment c_out = owner_->VisitStmt(c, b_out);
+ MergeOut(c_out);
+ }
+
+ void Seq(clang::Stmt* a, clang::Stmt* b) {
+ Environment a_out = owner_->VisitStmt(a, in());
+ Environment b_out = owner_->VisitStmt(b, a_out);
+ MergeOut(b_out);
+ }
+
+ void Loop(clang::Stmt* a, clang::Stmt* b, clang::Stmt* c) {
+ Seq(a, b, c);
+ MergeIn(out());
+ }
+
+ void Loop(clang::Stmt* a, clang::Stmt* b) {
+ Seq(a, b);
+ MergeIn(out());
+ }
+
+
+ private:
+ Environment in_;
+ Environment out_;
+ bool changed_;
+ FunctionAnalyzer* owner_;
+ Block* parent_;
+ };
+
+
+ DECL_VISIT_STMT(BreakStmt) {
+ block_->MergeOut(env);
+ return Environment::Unreachable();
+ }
+
+ DECL_VISIT_STMT(ContinueStmt) {
+ block_->MergeIn(env);
+ return Environment::Unreachable();
+ }
+
+ DECL_VISIT_STMT(CompoundStmt) {
+ Environment out = env;
+ clang::CompoundStmt::body_iterator end = stmt->body_end();
+ for (clang::CompoundStmt::body_iterator s = stmt->body_begin();
+ s != end;
+ ++s) {
+ out = VisitStmt(*s, out);
+ }
+ return out;
+ }
+
+ DECL_VISIT_STMT(WhileStmt) {
+ Block block (env, this);
+ do {
+ block.Loop(stmt->getCond(), stmt->getBody());
+ } while (block.changed());
+ return block.out();
+ }
+
+ DECL_VISIT_STMT(DoStmt) {
+ Block block (env, this);
+ do {
+ block.Loop(stmt->getBody(), stmt->getCond());
+ } while (block.changed());
+ return block.out();
+ }
+
+ DECL_VISIT_STMT(ForStmt) {
+ Block block (VisitStmt(stmt->getInit(), env), this);
+ do {
+ block.Loop(stmt->getCond(),
+ stmt->getBody(),
+ stmt->getInc());
+ } while (block.changed());
+ return block.out();
+ }
+
+ DECL_VISIT_STMT(IfStmt) {
+ Environment cond_out = VisitStmt(stmt->getCond(), env);
+ Environment then_out = VisitStmt(stmt->getThen(), cond_out);
+ Environment else_out = VisitStmt(stmt->getElse(), cond_out);
+ return Environment::Merge(then_out, else_out);
+ }
+
+ DECL_VISIT_STMT(SwitchStmt) {
+ Block block (env, this);
+ block.Seq(stmt->getCond(), stmt->getBody());
+ return block.out();
+ }
+
+ DECL_VISIT_STMT(CaseStmt) {
+ Environment in = Environment::Merge(env, block_->in());
+ Environment after_lhs = VisitStmt(stmt->getLHS(), in);
+ return VisitStmt(stmt->getSubStmt(), after_lhs);
+ }
+
+ DECL_VISIT_STMT(DefaultStmt) {
+ Environment in = Environment::Merge(env, block_->in());
+ return VisitStmt(stmt->getSubStmt(), in);
+ }
+
+ DECL_VISIT_STMT(ReturnStmt) {
+ VisitExpr(stmt->getRetValue(), env);
+ return Environment::Unreachable();
}
const clang::TagType* ToTagType(const clang::Type* t) {
@@ -311,11 +1061,14 @@
}
}
- bool IsRawPointerType(clang::Expr* expr) {
- clang::QualType result = expr->getType();
+ bool IsDerivedFrom(clang::CXXRecordDecl* record,
+ clang::CXXRecordDecl* base) {
+ return (record == base) || record->isDerivedFrom(base);
+ }
+ bool IsRawPointerType(clang::QualType qtype) {
const clang::PointerType* type =
- dyn_cast_or_null<clang::PointerType>(expr->getType().getTypePtr());
+ dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull());
if (type == NULL) return false;
const clang::TagType* pointee =
@@ -326,146 +1079,154 @@
dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl());
if (record == NULL) return false;
- return InV8Namespace(record) &&
- record->hasDefinition() &&
- ((record == objectDecl_) || record->isDerivedFrom(objectDecl_));
+ if (!InV8Namespace(record)) return false;
+
+ if (!record->hasDefinition()) return false;
+
+ record = record->getDefinition();
+
+ return IsDerivedFrom(record, object_decl_) &&
+ !IsDerivedFrom(record, smi_decl_);
}
- bool IsHandleDereference(clang::Expr* expr) {
- if (expr == NULL) {
- return false;
- } else if (isa<clang::UnaryOperator>(expr)) {
- clang::UnaryOperator* unop = cast<clang::UnaryOperator>(expr);
- return unop->getOpcode() == clang::UO_Deref &&
- IsHandleType(handleDeclName_, unop->getSubExpr()->getType());
- } else if (isa<clang::CXXOperatorCallExpr>(expr)) {
- clang::CXXOperatorCallExpr* op = cast<clang::CXXOperatorCallExpr>(expr);
- return (op->getOperator() == clang::OO_Star ||
- op->getOperator() == clang::OO_Arrow) &&
- IsHandleType(handleDeclName_, op->getArg(0)->getType());
- } else {
- return false;
- }
- }
+ Environment VisitDecl(clang::Decl* decl, const Environment& env) {
+ if (clang::VarDecl* var = dyn_cast<clang::VarDecl>(decl)) {
+ Environment out = var->hasInit() ? VisitStmt(var->getInit(), env) : env;
- bool CanCauseGC(clang::Expr* expr) {
- if (expr == NULL) return false;
-
- has_gc_ = false;
- has_derefs_ = true;
- TraverseStmt(expr);
- return has_gc_;
- }
-
- bool ManipulatesRawPointers(clang::Expr* expr) {
- if (expr == NULL) return false;
-
- has_gc_ = true;
- has_derefs_ = false;
- TraverseStmt(expr);
- return has_derefs_;
- }
-
- bool CanCauseGC(const clang::CallExpr* call) {
- const clang::FunctionDecl* fn = call->getDirectCallee();
- return (fn != NULL) && KnownToCauseGC(ctx_, fn);
- }
-
- // For generic expression classification.
- bool has_derefs_;
- bool has_gc_;
-
- // For callsite classification.
- static const int kMaxNumberOfArguments = 64;
- std::bitset<kMaxNumberOfArguments> derefs_;
- std::bitset<kMaxNumberOfArguments> gc_;
-
- clang::DeclarationName handleDeclName_;
- clang::MangleContext* ctx_;
- clang::CXXRecordDecl* objectDecl_;
-};
-
-const std::string BAD_EXPRESSION_MSG("Possible problem with evaluation order.");
-
-class ExpressionsFinder : public clang::ASTConsumer,
- public clang::RecursiveASTVisitor<ExpressionsFinder> {
- public:
- explicit ExpressionsFinder(clang::Diagnostic& d, clang::SourceManager& sm)
- : d_(d), sm_(sm) { }
-
- struct Resolver {
- explicit Resolver(clang::ASTContext& ctx)
- : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) {
- }
-
- Resolver(clang::ASTContext& ctx, clang::DeclContext* decl_ctx)
- : ctx_(ctx), decl_ctx_(decl_ctx) {
- }
-
- clang::DeclarationName ResolveName(const char* n) {
- clang::IdentifierInfo* ident = &ctx_.Idents.get(n);
- return ctx_.DeclarationNames.getIdentifier(ident);
- }
-
- Resolver ResolveNamespace(const char* n) {
- return Resolver(ctx_, Resolve<clang::NamespaceDecl>(n));
- }
-
- template<typename T>
- T* Resolve(const char* n) {
- if (decl_ctx_ == NULL) return NULL;
-
- clang::DeclContext::lookup_result result =
- decl_ctx_->lookup(ResolveName(n));
-
- for (clang::DeclContext::lookup_iterator i = result.first,
- e = result.second;
- i != e;
- i++) {
- if (isa<T>(*i)) return cast<T>(*i);
+ if (IsRawPointerType(var->getType())) {
+ out = out.Define(var->getNameAsString());
}
- return NULL;
+ return out;
}
+ // TODO: handle other declarations?
+ return env;
+ }
- private:
- clang::ASTContext& ctx_;
- clang::DeclContext* decl_ctx_;
- };
+ DECL_VISIT_STMT(DeclStmt) {
+ Environment out = env;
+ clang::DeclStmt::decl_iterator end = stmt->decl_end();
+ for (clang::DeclStmt::decl_iterator decl = stmt->decl_begin();
+ decl != end;
+ ++decl) {
+ out = VisitDecl(*decl, out);
+ }
+ return out;
+ }
+
+
+ void DefineParameters(const clang::FunctionDecl* f,
+ Environment* env) {
+ env->MDefine(THIS);
+ clang::FunctionDecl::param_const_iterator end = f->param_end();
+ for (clang::FunctionDecl::param_const_iterator p = f->param_begin();
+ p != end;
+ ++p) {
+ env->MDefine((*p)->getNameAsString());
+ }
+ }
+
+
+ void AnalyzeFunction(const clang::FunctionDecl* f) {
+ const clang::FunctionDecl* body = NULL;
+ if (f->hasBody(body)) {
+ Environment env;
+ DefineParameters(body, &env);
+ VisitStmt(body->getBody(), env);
+ Environment::ClearSymbolTable();
+ }
+ }
+
+ Block* EnterBlock(Block* block) {
+ Block* parent = block_;
+ block_ = block;
+ return parent;
+ }
+
+ void LeaveBlock(Block* block) {
+ block_ = block;
+ }
+
+ private:
+ void ReportUnsafe(const clang::Expr* expr, const std::string& msg) {
+ d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
+ d_.getCustomDiagID(clang::Diagnostic::Warning, msg));
+ }
+
+
+ clang::MangleContext* ctx_;
+ clang::DeclarationName handle_decl_name_;
+ clang::CXXRecordDecl* object_decl_;
+ clang::CXXRecordDecl* smi_decl_;
+
+ clang::Diagnostic& d_;
+ clang::SourceManager& sm_;
+
+ Block* block_;
+ bool dead_vars_analysis_;
+};
+
+
+class ProblemsFinder : public clang::ASTConsumer,
+ public clang::RecursiveASTVisitor<ProblemsFinder> {
+ public:
+ ProblemsFinder(clang::Diagnostic& d,
+ clang::SourceManager& sm,
+ const std::vector<std::string>& args)
+ : d_(d), sm_(sm), dead_vars_analysis_(false) {
+ for (unsigned i = 0; i < args.size(); ++i) {
+ if (args[i] == "--dead-vars") {
+ dead_vars_analysis_ = true;
+ }
+ }
+ }
virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
Resolver r(ctx);
- clang::CXXRecordDecl* objectDecl =
+ clang::CXXRecordDecl* object_decl =
r.ResolveNamespace("v8").ResolveNamespace("internal").
Resolve<clang::CXXRecordDecl>("Object");
- if (objectDecl != NULL) {
- expression_classifier_ =
- new ExpressionClassifier(r.ResolveName("Handle"),
- clang::createItaniumMangleContext(ctx, d_),
- objectDecl);
+ clang::CXXRecordDecl* smi_decl =
+ r.ResolveNamespace("v8").ResolveNamespace("internal").
+ Resolve<clang::CXXRecordDecl>("Smi");
+
+ if (object_decl != NULL) object_decl = object_decl->getDefinition();
+
+ if (smi_decl != NULL) smi_decl = smi_decl->getDefinition();
+
+ if (object_decl != NULL && smi_decl != NULL) {
+ function_analyzer_ =
+ new FunctionAnalyzer(clang::createItaniumMangleContext(ctx, d_),
+ r.ResolveName("Handle"),
+ object_decl,
+ smi_decl,
+ d_,
+ sm_,
+ dead_vars_analysis_);
TraverseDecl(ctx.getTranslationUnitDecl());
} else {
- std::cerr << "Failed to resolve v8::internal::Object" << std::endl;
+ if (object_decl == NULL) {
+ llvm::errs() << "Failed to resolve v8::internal::Object\n";
+ }
+ if (smi_decl == NULL) {
+ llvm::errs() << "Failed to resolve v8::internal::Smi\n";
+ }
}
}
- virtual bool VisitExpr(clang::Expr* expr) {
- if ( expression_classifier_->IsBadCallSite(expr) ) {
- d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
- d_.getCustomDiagID(clang::Diagnostic::Warning,
- BAD_EXPRESSION_MSG));
- }
-
+ virtual bool VisitFunctionDecl(clang::FunctionDecl* decl) {
+ function_analyzer_->AnalyzeFunction(decl);
return true;
}
private:
clang::Diagnostic& d_;
clang::SourceManager& sm_;
+ bool dead_vars_analysis_;
- ExpressionClassifier* expression_classifier_;
+ FunctionAnalyzer* function_analyzer_;
};
@@ -474,22 +1235,27 @@
protected:
clang::ASTConsumer *CreateASTConsumer(clang::CompilerInstance &CI,
llvm::StringRef InFile) {
- return new ConsumerType(CI.getDiagnostics(), CI.getSourceManager());
+ return new ConsumerType(CI.getDiagnostics(), CI.getSourceManager(), args_);
}
bool ParseArgs(const clang::CompilerInstance &CI,
const std::vector<std::string>& args) {
+ args_ = args;
return true;
}
- void PrintHelp(llvm::raw_ostream& ros) { }
+ void PrintHelp(llvm::raw_ostream& ros) {
+ }
+ private:
+ std::vector<std::string> args_;
};
}
-static clang::FrontendPluginRegistry::Add<Action<ExpressionsFinder> >
-FindProblems("find-problems", "Find possible problems with evaluations order.");
+static clang::FrontendPluginRegistry::Add<Action<ProblemsFinder> >
+FindProblems("find-problems", "Find GC-unsafe places.");
-static clang::FrontendPluginRegistry::Add<Action<FunctionDeclarationFinder> >
+static clang::FrontendPluginRegistry::Add<
+ Action<FunctionDeclarationFinder> >
DumpCallees("dump-callees", "Dump callees for each function.");
diff --git a/tools/gcmole/gcmole.lua b/tools/gcmole/gcmole.lua
index 7fb8de0..4afc66d 100644
--- a/tools/gcmole/gcmole.lua
+++ b/tools/gcmole/gcmole.lua
@@ -29,8 +29,44 @@
-- Usage: CLANG_BIN=clang-bin-dir lua tools/gcmole/gcmole.lua [arm|ia32|x64]
local DIR = arg[0]:match("^(.+)/[^/]+$")
-
-local ARCHS = arg[1] and { arg[1] } or { 'ia32', 'arm', 'x64' }
+
+local FLAGS = {
+ -- Do not build gcsuspects file and reuse previously generated one.
+ reuse_gcsuspects = false;
+
+ -- Print commands to console before executing them.
+ verbose = false;
+
+ -- Perform dead variable analysis (generates many false positives).
+ -- TODO add some sort of whiteliste to filter out false positives.
+ dead_vars = false;
+
+ -- When building gcsuspects whitelist certain functions as if they
+ -- can be causing GC. Currently used to reduce number of false
+ -- positives in dead variables analysis. See TODO for WHITELIST
+ -- below.
+ whitelist = true;
+}
+local ARGS = {}
+
+for i = 1, #arg do
+ local flag = arg[i]:match "^%-%-([%w_-]+)$"
+ if flag then
+ local no, real_flag = flag:match "^(no)([%w_-]+)$"
+ if real_flag then flag = real_flag end
+
+ flag = flag:gsub("%-", "_")
+ if FLAGS[flag] ~= nil then
+ FLAGS[flag] = (no ~= "no")
+ else
+ error("Unknown flag: " .. flag)
+ end
+ else
+ table.insert(ARGS, arg[i])
+ end
+end
+
+local ARCHS = ARGS[1] and { ARGS[1] } or { 'ia32', 'arm', 'x64' }
local io = require "io"
local os = require "os"
@@ -43,33 +79,40 @@
-------------------------------------------------------------------------------
-- Clang invocation
-local CLANG_BIN = os.getenv "CLANG_BIN"
+local CLANG_BIN = os.getenv "CLANG_BIN"
if not CLANG_BIN or CLANG_BIN == "" then
error "CLANG_BIN not set"
-end
+end
-local function MakeClangCommandLine(plugin, triple, arch_define)
- return CLANG_BIN .. "/clang -cc1 -load " .. DIR .. "/libgcmole.so"
+local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
+ if plugin_args then
+ for i = 1, #plugin_args do
+ plugin_args[i] = "-plugin-arg-" .. plugin .. " " .. plugin_args[i]
+ end
+ plugin_args = " " .. table.concat(plugin_args, " ")
+ end
+ return CLANG_BIN .. "/clang -cc1 -load " .. DIR .. "/libgcmole.so"
.. " -plugin " .. plugin
- .. " -triple " .. triple
+ .. (plugin_args or "")
+ .. " -triple " .. triple
.. " -D" .. arch_define
- .. " -DENABLE_VMSTATE_TRACKING"
- .. " -DENABLE_LOGGING_AND_PROFILING"
+ .. " -DENABLE_VMSTATE_TRACKING"
+ .. " -DENABLE_LOGGING_AND_PROFILING"
.. " -DENABLE_DEBUGGER_SUPPORT"
.. " -Isrc"
end
function InvokeClangPluginForEachFile(filenames, cfg, func)
local cmd_line = MakeClangCommandLine(cfg.plugin,
- cfg.triple,
- cfg.arch_define)
+ cfg.plugin_args,
+ cfg.triple,
+ cfg.arch_define)
- for _, filename in ipairs(filenames) do
+ for _, filename in ipairs(filenames) do
log("-- %s", filename)
-
local action = cmd_line .. " src/" .. filename .. " 2>&1"
-
+ if FLAGS.verbose then print('popen ', action) end
local pipe = io.popen(action)
func(filename, pipe:lines())
pipe:close()
@@ -84,7 +127,7 @@
local sconscript = f:read('*a')
f:close()
- local SOURCES = sconscript:match "SOURCES = {(.-)}";
+ local SOURCES = sconscript:match "SOURCES = {(.-)}";
local sources = {}
@@ -93,13 +136,13 @@
local files = {}
for file in list:gmatch "[^%s]+" do table.insert(files, file) end
sources[condition] = files
- end
+ end
for condition, list in SOURCES:gmatch "'([^']-)': %[(.-)%]" do
local files = {}
for file in list:gmatch "'([^']-)'" do table.insert(files, file) end
sources[condition] = files
- end
+ end
return sources
end
@@ -119,7 +162,7 @@
local list = {}
for condition, files in pairs(sources) do
if EvaluateCondition(condition, props) then
- for i = 1, #files do table.insert(list, files[i]) end
+ for i = 1, #files do table.insert(list, files[i]) end
end
end
return list
@@ -129,9 +172,9 @@
local function FilesForArch(arch)
return BuildFileList(sources, { os = 'linux',
- arch = arch,
- mode = 'debug',
- simulator = ''})
+ arch = arch,
+ mode = 'debug',
+ simulator = ''})
end
local mtConfig = {}
@@ -149,29 +192,67 @@
local ARCHITECTURES = {
ia32 = config { triple = "i586-unknown-linux",
- arch_define = "V8_TARGET_ARCH_IA32" },
+ arch_define = "V8_TARGET_ARCH_IA32" },
arm = config { triple = "i586-unknown-linux",
- arch_define = "V8_TARGET_ARCH_ARM" },
+ arch_define = "V8_TARGET_ARCH_ARM" },
x64 = config { triple = "x86_64-unknown-linux",
- arch_define = "V8_TARGET_ARCH_X64" }
+ arch_define = "V8_TARGET_ARCH_X64" }
}
-------------------------------------------------------------------------------
--- GCSuspects Generation
+-- GCSuspects Generation
-local gc = {}
-local funcs = {}
+local gc, gc_caused, funcs
+
+local WHITELIST = {
+ -- The following functions call CEntryStub which is always present.
+ "MacroAssembler.*CallExternalReference",
+ "MacroAssembler.*CallRuntime",
+ "CompileCallLoadPropertyWithInterceptor",
+ "CallIC.*GenerateMiss",
+
+ -- DirectCEntryStub is a special stub used on ARM.
+ -- It is pinned and always present.
+ "DirectCEntryStub.*GenerateCall",
+
+ -- TODO GCMole currently is sensitive enough to understand that certain
+ -- functions only cause GC and return Failure simulataneously.
+ -- Callsites of such functions are safe as long as they are properly
+ -- check return value and propagate the Failure to the caller.
+ -- It should be possible to extend GCMole to understand this.
+ "Heap.*AllocateFunctionPrototype"
+};
+
+local function AddCause(name, cause)
+ local t = gc_caused[name]
+ if not t then
+ t = {}
+ gc_caused[name] = t
+ end
+ table.insert(t, cause)
+end
local function resolve(name)
local f = funcs[name]
-
- if not f then
+
+ if not f then
f = {}
funcs[name] = f
-
- if name:match "Collect.*Garbage" then gc[name] = true end
+
+ if name:match "Collect.*Garbage" then
+ gc[name] = true
+ AddCause(name, "<GC>")
+ end
+
+ if FLAGS.whitelist then
+ for i = 1, #WHITELIST do
+ if name:match(WHITELIST[i]) then
+ gc[name] = false
+ end
+ end
+ end
end
-
+
return f
end
@@ -180,11 +261,11 @@
for funcname in lines do
if funcname:sub(1, 1) ~= '\t' then
- resolve(funcname)
- scope = funcname
+ resolve(funcname)
+ scope = funcname
else
- local name = funcname:sub(2)
- resolve(name)[scope] = true
+ local name = funcname:sub(2)
+ resolve(name)[scope] = true
end
end
end
@@ -192,60 +273,82 @@
local function propagate ()
log "** Propagating GC information"
- local function mark(callers)
- for caller, _ in pairs(callers) do
- if not gc[caller] then
- gc[caller] = true
- mark(funcs[caller])
- end
+ local function mark(from, callers)
+ for caller, _ in pairs(callers) do
+ if gc[caller] == nil then
+ gc[caller] = true
+ mark(caller, funcs[caller])
+ end
+ AddCause(caller, from)
end
end
for funcname, callers in pairs(funcs) do
- if gc[funcname] then mark(callers) end
+ if gc[funcname] then mark(funcname, callers) end
end
end
local function GenerateGCSuspects(arch, files, cfg)
+ -- Reset the global state.
+ gc, gc_caused, funcs = {}, {}, {}
+
log ("** Building GC Suspects for %s", arch)
InvokeClangPluginForEachFile (files,
cfg:extend { plugin = "dump-callees" },
parse)
-
+
propagate()
local out = assert(io.open("gcsuspects", "w"))
- for name, _ in pairs(gc) do out:write (name, '\n') end
+ for name, value in pairs(gc) do if value then out:write (name, '\n') end end
out:close()
+
+ local out = assert(io.open("gccauses", "w"))
+ out:write "GC = {"
+ for name, causes in pairs(gc_caused) do
+ out:write("['", name, "'] = {")
+ for i = 1, #causes do out:write ("'", causes[i], "';") end
+ out:write("};\n")
+ end
+ out:write "}"
+ out:close()
+
log ("** GCSuspects generated for %s", arch)
end
--------------------------------------------------------------------------------
+--------------------------------------------------------------------------------
-- Analysis
-local function CheckCorrectnessForArch(arch)
+local function CheckCorrectnessForArch(arch)
local files = FilesForArch(arch)
local cfg = ARCHITECTURES[arch]
- GenerateGCSuspects(arch, files, cfg)
+ if not FLAGS.reuse_gcsuspects then
+ GenerateGCSuspects(arch, files, cfg)
+ end
local processed_files = 0
local errors_found = false
local function SearchForErrors(filename, lines)
processed_files = processed_files + 1
for l in lines do
- errors_found = errors_found or
- l:match "^[^:]+:%d+:%d+:" or
- l:match "error" or
- l:match "warning"
+ errors_found = errors_found or
+ l:match "^[^:]+:%d+:%d+:" or
+ l:match "error" or
+ l:match "warning"
print(l)
end
end
- log("** Searching for evaluation order problems for %s", arch)
+ log("** Searching for evaluation order problems%s for %s",
+ FLAGS.dead_vars and " and dead variables" or "",
+ arch)
+ local plugin_args
+ if FLAGS.dead_vars then plugin_args = { "--dead-vars" } end
InvokeClangPluginForEachFile(files,
- cfg:extend { plugin = "find-problems" },
- SearchForErrors)
+ cfg:extend { plugin = "find-problems",
+ plugin_args = plugin_args },
+ SearchForErrors)
log("** Done processing %d files. %s",
processed_files,
errors_found and "Errors found" or "No errors found")
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 7fea189..2a8aa42 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -453,8 +453,6 @@
'../../src/fixed-dtoa.h',
'../../src/flags.cc',
'../../src/flags.h',
- '../../src/frame-element.cc',
- '../../src/frame-element.h',
'../../src/frames-inl.h',
'../../src/frames.cc',
'../../src/frames.h',
@@ -583,8 +581,6 @@
'../../src/stub-cache.h',
'../../src/token.cc',
'../../src/token.h',
- '../../src/top.cc',
- '../../src/top.h',
'../../src/type-info.cc',
'../../src/type-info.h',
'../../src/unbound-queue-inl.h',
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 96103ba..adb6409 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -168,7 +168,6 @@
8956928012D4ED240072C313 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
8956928212D4ED240072C313 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
8956928312D4ED240072C313 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
- 8956928412D4ED240072C313 /* top.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1900E719B8F00D62E90 /* top.cc */; };
8956928512D4ED240072C313 /* type-info.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BAE1175B2D200C4CD55 /* type-info.cc */; };
8956928612D4ED240072C313 /* unicode.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1930E719B8F00D62E90 /* unicode.cc */; };
8956928712D4ED240072C313 /* utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1970E719B8F00D62E90 /* utils.cc */; };
@@ -274,7 +273,6 @@
89A88E230E71A6BE0043BA31 /* stub-cache-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18B0E719B8F00D62E90 /* stub-cache-ia32.cc */; };
89A88E240E71A6BF0043BA31 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
89A88E250E71A6C20043BA31 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
- 89A88E260E71A6C90043BA31 /* top.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1900E719B8F00D62E90 /* top.cc */; };
89A88E270E71A6CB0043BA31 /* unicode.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1930E719B8F00D62E90 /* unicode.cc */; };
89A88E290E71A6CE0043BA31 /* utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1970E719B8F00D62E90 /* utils.cc */; };
89A88E2A0E71A6D00043BA31 /* v8-counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1990E719B8F00D62E90 /* v8-counters.cc */; };
@@ -362,7 +360,6 @@
89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
89F23C780E78D5B2006B2466 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
89F23C790E78D5B2006B2466 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
- 89F23C7A0E78D5B2006B2466 /* top.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1900E719B8F00D62E90 /* top.cc */; };
89F23C7B0E78D5B2006B2466 /* unicode.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1930E719B8F00D62E90 /* unicode.cc */; };
89F23C7D0E78D5B2006B2466 /* utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1970E719B8F00D62E90 /* utils.cc */; };
89F23C7E0E78D5B2006B2466 /* v8-counters.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1990E719B8F00D62E90 /* v8-counters.cc */; };
@@ -768,7 +765,6 @@
897FF18D0E719B8F00D62E90 /* stub-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "stub-cache.h"; sourceTree = "<group>"; };
897FF18E0E719B8F00D62E90 /* token.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = token.cc; sourceTree = "<group>"; };
897FF18F0E719B8F00D62E90 /* token.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = token.h; sourceTree = "<group>"; };
- 897FF1900E719B8F00D62E90 /* top.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = top.cc; sourceTree = "<group>"; };
897FF1920E719B8F00D62E90 /* unicode-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unicode-inl.h"; sourceTree = "<group>"; };
897FF1930E719B8F00D62E90 /* unicode.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = unicode.cc; sourceTree = "<group>"; };
897FF1940E719B8F00D62E90 /* unicode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unicode.h; sourceTree = "<group>"; };
@@ -1237,7 +1233,6 @@
897FF18D0E719B8F00D62E90 /* stub-cache.h */,
897FF18E0E719B8F00D62E90 /* token.cc */,
897FF18F0E719B8F00D62E90 /* token.h */,
- 897FF1900E719B8F00D62E90 /* top.cc */,
9FA38BAE1175B2D200C4CD55 /* type-info.cc */,
9FA38BAF1175B2D200C4CD55 /* type-info.h */,
9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */,
@@ -1846,7 +1841,6 @@
8956928012D4ED240072C313 /* string-stream.cc in Sources */,
8956928212D4ED240072C313 /* stub-cache.cc in Sources */,
8956928312D4ED240072C313 /* token.cc in Sources */,
- 8956928412D4ED240072C313 /* top.cc in Sources */,
8956928512D4ED240072C313 /* type-info.cc in Sources */,
8956928612D4ED240072C313 /* unicode.cc in Sources */,
8956928712D4ED240072C313 /* utils.cc in Sources */,
@@ -1988,7 +1982,6 @@
89A88E230E71A6BE0043BA31 /* stub-cache-ia32.cc in Sources */,
89A88E240E71A6BF0043BA31 /* stub-cache.cc in Sources */,
89A88E250E71A6C20043BA31 /* token.cc in Sources */,
- 89A88E260E71A6C90043BA31 /* top.cc in Sources */,
9FA38BC01175B2D200C4CD55 /* type-info.cc in Sources */,
89A88E270E71A6CB0043BA31 /* unicode.cc in Sources */,
89A88E290E71A6CE0043BA31 /* utils.cc in Sources */,
@@ -2157,7 +2150,6 @@
89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */,
89F23C780E78D5B2006B2466 /* stub-cache.cc in Sources */,
89F23C790E78D5B2006B2466 /* token.cc in Sources */,
- 89F23C7A0E78D5B2006B2466 /* top.cc in Sources */,
9FA38BB91175B2D200C4CD55 /* type-info.cc in Sources */,
89F23C7B0E78D5B2006B2466 /* unicode.cc in Sources */,
89F23C7D0E78D5B2006B2466 /* utils.cc in Sources */,