Version 3.19.0
Deprecated Context::New which returns Persistent.
Added Persistent<T>::Reset which disposes the handle and redirects it to point to another object.
Deprecated WriteAscii and MayContainNonAscii.
Exposed AssertNoAllocation to API.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@14603 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index ac1be21..d1dbb29 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2013-05-10: Version 3.19.0
+
+ Deprecated Context::New which returns Persistent.
+
+ Added Persistent<T>::Reset which disposes the handle and redirects it to
+ point to another object.
+
+ Deprecated WriteAscii and MayContainNonAscii.
+
+ Exposed AssertNoAllocation to API.
+
+ Performance and stability improvements on all platforms.
+
+
2013-04-30: Version 3.18.5
Allowed setting debugger breakpoints on CompareNilICs (issue 2660)
diff --git a/include/v8.h b/include/v8.h
index ee49ca4..3a86e86 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -123,6 +123,7 @@
class Int32;
class Integer;
class Isolate;
+class LocalContext;
class Number;
class NumberObject;
class Object;
@@ -190,8 +191,13 @@
* \param object the weak global object to be reclaimed by the garbage collector
* \param parameter the value passed in when making the weak global object
*/
-typedef void (*WeakReferenceCallback)(Persistent<Value> object,
- void* parameter);
+template<typename T, typename P>
+class WeakReferenceCallbacks {
+ public:
+ typedef void (*Revivable)(Isolate* isolate,
+ Persistent<T>* object,
+ P* parameter);
+};
// TODO(svenpanne) Temporary definition until Chrome is in sync.
typedef void (*NearDeathCallback)(Isolate* isolate,
@@ -205,6 +211,9 @@
*(static_cast<T* volatile*>(0)) = static_cast<S*>(0); \
}
+
+#define V8_USE_UNSAFE_HANDLES
+
/**
* An object reference managed by the v8 garbage collector.
*
@@ -237,10 +246,12 @@
*/
V8_INLINE(Handle()) : val_(0) {}
+#ifdef V8_USE_UNSAFE_HANDLES
/**
* Creates a new handle for the specified value.
*/
V8_INLINE(explicit Handle(T* val)) : val_(val) {}
+#endif
/**
* Creates a handle for the contents of the specified handle. This
@@ -282,7 +293,7 @@
* to which they refer are identical.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE(bool operator==(Handle<S> that) const) {
+ template <class S> V8_INLINE(bool operator==(const Handle<S> that) const) {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
@@ -290,6 +301,17 @@
return *a == *b;
}
+#ifndef V8_USE_UNSAFE_HANDLES
+ template <class S> V8_INLINE(
+ bool operator==(const Persistent<S>& that) const) {
+ internal::Object** a = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+#endif
+
/**
* Checks whether two handles are different.
* Returns true if only one of the handles is empty, or if
@@ -313,7 +335,46 @@
return Handle<S>::Cast(*this);
}
+#ifndef V8_USE_UNSAFE_HANDLES
+ V8_INLINE(static Handle<T> New(Isolate* isolate, Handle<T> that)) {
+ return New(isolate, that.val_);
+ }
+ // TODO(dcarney): remove before cutover
+ V8_INLINE(static Handle<T> New(Isolate* isolate, const Persistent<T>& that)) {
+ return New(isolate, that.val_);
+ }
+
+#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+
private:
+#endif
+ /**
+ * Creates a new handle for the specified value.
+ */
+ V8_INLINE(explicit Handle(T* val)) : val_(val) {}
+#endif
+
+ private:
+ template<class F>
+ friend class Persistent;
+ template<class F>
+ friend class Local;
+ friend class Arguments;
+ friend class String;
+ friend class Object;
+ friend class AccessorInfo;
+ friend Handle<Primitive> Undefined(Isolate* isolate);
+ friend Handle<Primitive> Null(Isolate* isolate);
+ friend Handle<Boolean> True(Isolate* isolate);
+ friend Handle<Boolean> False(Isolate* isolate);
+ friend class Context;
+ friend class InternalHandleHelper;
+ friend class LocalContext;
+
+#ifndef V8_USE_UNSAFE_HANDLES
+ V8_INLINE(static Handle<T> New(Isolate* isolate, T* that));
+#endif
+
T* val_;
};
@@ -325,6 +386,7 @@
* handle scope are destroyed when the handle scope is destroyed. Hence it
* is not necessary to explicitly deallocate local handles.
*/
+// TODO(dcarney): deprecate entire class
template <class T> class Local : public Handle<T> {
public:
V8_INLINE(Local());
@@ -337,7 +399,12 @@
*/
TYPE_CHECK(T, S);
}
+
+
+#ifdef V8_USE_UNSAFE_HANDLES
template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
+#endif
+
template <class S> V8_INLINE(static Local<T> Cast(Local<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
@@ -346,6 +413,12 @@
#endif
return Local<T>(T::Cast(*that));
}
+#ifndef V8_USE_UNSAFE_HANDLES
+ template <class S> V8_INLINE(Local(Handle<S> that))
+ : Handle<T>(reinterpret_cast<T*>(*that)) {
+ TYPE_CHECK(T, S);
+ }
+#endif
template <class S> V8_INLINE(Local<S> As()) {
return Local<S>::Cast(*this);
@@ -358,8 +431,32 @@
*/
V8_INLINE(static Local<T> New(Handle<T> that));
V8_INLINE(static Local<T> New(Isolate* isolate, Handle<T> that));
-};
+#ifndef V8_USE_UNSAFE_HANDLES
+ // TODO(dcarney): remove before cutover
+ V8_INLINE(static Local<T> New(Isolate* isolate, const Persistent<T>& that));
+#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+
+ private:
+#endif
+ template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
+#endif
+
+ private:
+ template<class F>
+ friend class Persistent;
+ template<class F>
+ friend class Handle;
+ friend class Arguments;
+ friend class String;
+ friend class Object;
+ friend class AccessorInfo;
+ friend class Context;
+ friend class InternalHandleHelper;
+ friend class LocalContext;
+
+ V8_INLINE(static Local<T> New(Isolate* isolate, T* that));
+};
/**
* An object reference that is independent of any handle scope. Where
@@ -378,13 +475,38 @@
* different storage cells but rather two references to the same
* storage cell.
*/
-template <class T> class Persistent : public Handle<T> {
+template <class T> class Persistent // NOLINT
+#ifdef V8_USE_UNSAFE_HANDLES
+ : public Handle<T> {
+#else
+ { // NOLINT
+#endif
public:
+#ifndef V8_USE_UNSAFE_HANDLES
+ V8_INLINE(Persistent()) : val_(0) { }
+ V8_INLINE(~Persistent()) {
+ // TODO(dcarney): add this back before cutover.
+ // Dispose();
+ }
+ V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
+ // TODO(dcarney): remove somehow before cutover
+ // The handle should either be 0, or a pointer to a live cell.
+ V8_INLINE(void Clear()) { val_ = 0; }
+
+ /**
+ * A constructor that creates a new global cell pointing to that. In contrast
+ * to the copy constructor, this creates a new persistent handle which needs
+ * to be separately disposed.
+ */
+ template <class S> V8_INLINE(Persistent(Isolate* isolate, Handle<S> that))
+ : val_(*New(isolate, that)) { }
+
+#else
/**
* Creates an empty persistent handle that doesn't point to any
* storage cell.
*/
- V8_INLINE(Persistent());
+ V8_INLINE(Persistent()) : Handle<T>() { }
/**
* Creates a persistent handle for the same storage cell as the
@@ -424,6 +546,8 @@
template <class S> explicit V8_INLINE(Persistent(Handle<S> that))
: Handle<T>(*that) { }
+#endif
+
template <class S> V8_INLINE(static Persistent<T> Cast(Persistent<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
@@ -437,16 +561,38 @@
return Persistent<S>::Cast(*this);
}
- /** Deprecated. Use Isolate version instead. */
V8_DEPRECATED(static Persistent<T> New(Handle<T> that));
/**
* Creates a new persistent handle for an existing local or persistent handle.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(static Persistent<T> New(Isolate* isolate, Handle<T> that));
+#ifndef V8_USE_UNSAFE_HANDLES
+ // TODO(dcarney): remove before cutover
+ V8_INLINE(static Persistent<T> New(Isolate* isolate, Persistent<T> that));
+#endif
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(void Dispose());
+#ifndef V8_USE_UNSAFE_HANDLES
+ template <class S> V8_INLINE(
+ bool operator==(const Persistent<S>& that) const) {
+ internal::Object** a = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+
+ template <class S> V8_INLINE(bool operator==(const Handle<S> that) const) {
+ internal::Object** a = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** b = reinterpret_cast<internal::Object**>(*that);
+ if (a == 0) return b == 0;
+ if (b == 0) return false;
+ return *a == *b;
+ }
+#endif
+
+ V8_INLINE(void Dispose());
/**
* Releases the storage cell referenced by this persistent handle.
@@ -454,11 +600,20 @@
* This handle's reference, and any other references to the storage
* cell remain and IsEmpty will still return false.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(void Dispose(Isolate* isolate));
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(void MakeWeak(void* parameters,
- WeakReferenceCallback callback));
+ template<typename S, typename P>
+ V8_INLINE(void MakeWeak(
+ Isolate* isolate,
+ P* parameters,
+ typename WeakReferenceCallbacks<S, P>::Revivable callback));
+
+ template<typename P>
+ V8_INLINE(void MakeWeak(
+ Isolate* isolate,
+ P* parameters,
+ typename WeakReferenceCallbacks<T, P>::Revivable callback));
/**
* Make the reference to this object weak. When only weak handles
@@ -466,18 +621,17 @@
* callback to the given V8::NearDeathCallback function, passing
* it the object reference and the given parameters.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(void MakeWeak(Isolate* isolate,
void* parameters,
NearDeathCallback callback));
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(void ClearWeak());
+ V8_INLINE(void ClearWeak());
- /** Clears the weak reference to this object. */
+ // TODO(dcarney): remove before cutover
V8_INLINE(void ClearWeak(Isolate* isolate));
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(void MarkIndependent());
+ V8_INLINE(void MarkIndependent());
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -485,10 +639,10 @@
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(void MarkIndependent(Isolate* isolate));
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(void MarkPartiallyDependent());
+ V8_INLINE(void MarkPartiallyDependent());
/**
* Marks the reference to this object partially dependent. Partially dependent
@@ -498,47 +652,103 @@
* external dependencies. This mark is automatically cleared after each
* garbage collection.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(void MarkPartiallyDependent(Isolate* isolate));
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(bool IsIndependent() const);
+ V8_INLINE(bool IsIndependent() const);
- /** Returns true if this handle was previously marked as independent. */
+ // TODO(dcarney): remove before cutover
V8_INLINE(bool IsIndependent(Isolate* isolate) const);
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(bool IsNearDeath() const);
+ V8_INLINE(bool IsNearDeath() const);
/** Checks if the handle holds the only reference to an object. */
+ // TODO(dcarney): remove before cutover
V8_INLINE(bool IsNearDeath(Isolate* isolate) const);
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(bool IsWeak() const);
+ V8_INLINE(bool IsWeak() const);
/** Returns true if the handle's reference is weak. */
+ // TODO(dcarney): remove before cutover
V8_INLINE(bool IsWeak(Isolate* isolate) const);
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(void SetWrapperClassId(uint16_t class_id));
+ V8_INLINE(void SetWrapperClassId(uint16_t class_id));
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
* description in v8-profiler.h for details.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id));
- /** Deprecated. Use Isolate version instead. */
- V8_DEPRECATED(uint16_t WrapperClassId() const);
+ V8_INLINE(uint16_t WrapperClassId() const);
/**
* Returns the class ID previously assigned to this handle or 0 if no class ID
* was previously assigned.
*/
+ // TODO(dcarney): remove before cutover
V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const);
+ /**
+ * Disposes the current contents of the handle and replaces it.
+ */
+ V8_INLINE(void Reset(Isolate* isolate, const Handle<T>& other));
+
+#ifndef V8_USE_UNSAFE_HANDLES
+
+#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
private:
+#endif
+ // TODO(dcarney): make unlinkable before cutover
+ V8_INLINE(Persistent(const Persistent& that)) : val_(that.val_) {}
+ // TODO(dcarney): make unlinkable before cutover
+ V8_INLINE(Persistent& operator=(const Persistent& that)) { // NOLINT
+ this->val_ = that.val_;
+ return *this;
+ }
+
+ public:
+#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+
+ private:
+#endif
+ // TODO(dcarney): remove before cutover
+ template <class S> V8_INLINE(Persistent(S* that)) : val_(that) { }
+ // TODO(dcarney): remove before cutover
+ template <class S> V8_INLINE(Persistent(Persistent<S> that))
+ : val_(*that) {
+ TYPE_CHECK(T, S);
+ }
+ // TODO(dcarney): remove before cutover
+ V8_INLINE(T* operator*() const) { return val_; }
+ public:
+#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
+ private:
+#endif
+ // TODO(dcarney): remove before cutover
+ V8_INLINE(T* operator->() const) { return val_; }
+ public:
+#endif
+
+ private:
+ template<class F>
+ friend class Handle;
+ template<class F>
+ friend class Local;
friend class ImplementationUtilities;
friend class ObjectTemplate;
+ friend class Context;
+ friend class InternalHandleHelper;
+ friend class LocalContext;
+
+ V8_INLINE(static Persistent<T> New(Isolate* isolate, T* that));
+
+#ifndef V8_USE_UNSAFE_HANDLES
+ T* val_;
+#endif
};
@@ -1108,6 +1318,73 @@
*/
bool IsRegExp() const;
+
+ /**
+ * Returns true if this value is an ArrayBuffer.
+ * This is an experimental feature.
+ */
+ bool IsArrayBuffer() const;
+
+ /**
+ * Returns true if this value is one of TypedArrays.
+ * This is an experimental feature.
+ */
+ bool IsTypedArray() const;
+
+ /**
+ * Returns true if this value is an Uint8Array.
+ * This is an experimental feature.
+ */
+ bool IsUint8Array() const;
+
+ /**
+ * Returns true if this value is an Uint8ClampedArray.
+ * This is an experimental feature.
+ */
+ bool IsUint8ClampedArray() const;
+
+ /**
+ * Returns true if this value is an Int8Array.
+ * This is an experimental feature.
+ */
+ bool IsInt8Array() const;
+
+ /**
+ * Returns true if this value is an Uint16Array.
+ * This is an experimental feature.
+ */
+ bool IsUint16Array() const;
+
+ /**
+ * Returns true if this value is an Int16Array.
+ * This is an experimental feature.
+ */
+ bool IsInt16Array() const;
+
+ /**
+ * Returns true if this value is an Uint32Array.
+ * This is an experimental feature.
+ */
+ bool IsUint32Array() const;
+
+ /**
+ * Returns true if this value is an Int32Array.
+ * This is an experimental feature.
+ */
+ bool IsInt32Array() const;
+
+ /**
+ * Returns true if this value is a Float32Array.
+ * This is an experimental feature.
+ */
+ bool IsFloat32Array() const;
+
+ /**
+ * Returns true if this value is a Float64Array.
+ * This is an experimental feature.
+ */
+ bool IsFloat64Array() const;
+
Local<Boolean> ToBoolean() const;
Local<Number> ToNumber() const;
Local<String> ToString() const;
@@ -1185,8 +1462,7 @@
/**
* This function is no longer useful.
*/
- // TODO(dcarney): deprecate
- V8_INLINE(bool MayContainNonAscii()) const { return true; }
+ V8_DEPRECATED(V8_INLINE(bool MayContainNonAscii()) const) { return true; }
/**
* Returns whether this string contains only one byte data.
@@ -1231,10 +1507,10 @@
int length = -1,
int options = NO_OPTIONS) const;
// ASCII characters.
- int WriteAscii(char* buffer,
- int start = 0,
- int length = -1,
- int options = NO_OPTIONS) const;
+ V8_DEPRECATED(int WriteAscii(char* buffer,
+ int start = 0,
+ int length = -1,
+ int options = NO_OPTIONS) const);
// One byte characters.
int WriteOneByte(uint8_t* buffer,
int start = 0,
@@ -2099,6 +2375,21 @@
/**
+ * An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
+ * This API is experimental and may change significantly.
+ */
+class V8EXPORT Uint8ClampedArray : public TypedArray {
+ public:
+ static Local<Uint8ClampedArray> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE(static Uint8ClampedArray* Cast(Value* obj));
+
+ private:
+ Uint8ClampedArray();
+ static void CheckCast(Value* obj);
+};
+
+/**
* An instance of Int8Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
*/
@@ -3604,6 +3895,24 @@
/**
+ * Asserts that no action is performed that could cause a handle's value
+ * to be modified. Useful when otherwise unsafe handle operations need to
+ * be performed.
+ */
+class V8EXPORT AssertNoGCScope {
+#ifndef DEBUG
+ V8_INLINE(AssertNoGCScope(Isolate* isolate)) {}
+#else
+ AssertNoGCScope(Isolate* isolate);
+ ~AssertNoGCScope();
+ private:
+ Isolate* isolate_;
+ bool last_state_;
+#endif
+};
+
+
+/**
* Container class for static utility functions.
*/
class V8EXPORT V8 {
@@ -4073,10 +4382,11 @@
internal::Object** handle);
static void DisposeGlobal(internal::Isolate* isolate,
internal::Object** global_handle);
+ typedef WeakReferenceCallbacks<Value, void>::Revivable RevivableCallback;
static void MakeWeak(internal::Isolate* isolate,
internal::Object** global_handle,
void* data,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback);
static void ClearWeak(internal::Isolate* isolate,
internal::Object** global_handle);
@@ -4420,7 +4730,16 @@
explicit V8_INLINE(Scope(Handle<Context> context)) : context_(context) {
context_->Enter();
}
+ V8_INLINE(Scope(Isolate* isolate, Persistent<Context>& context)) // NOLINT
+#ifndef V8_USE_UNSAFE_HANDLES
+ : context_(Handle<Context>::New(isolate, context)) {
+#else
+ : context_(Local<Context>::New(isolate, context)) {
+#endif
+ context_->Enter();
+ }
V8_INLINE(~Scope()) { context_->Exit(); }
+
private:
Handle<Context> context_;
};
@@ -4707,7 +5026,7 @@
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 64;
+ static const int kContextEmbedderDataIndex = 65;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -4859,8 +5178,30 @@
template <class T>
Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
- if (that.IsEmpty()) return Local<T>();
- T* that_ptr = *that;
+ return New(isolate, that.val_);
+}
+
+#ifndef V8_USE_UNSAFE_HANDLES
+template <class T>
+Local<T> Local<T>::New(Isolate* isolate, const Persistent<T>& that) {
+ return New(isolate, that.val_);
+}
+
+template <class T>
+Handle<T> Handle<T>::New(Isolate* isolate, T* that) {
+ if (that == NULL) return Handle<T>();
+ T* that_ptr = that;
+ internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
+ return Handle<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
+ reinterpret_cast<internal::Isolate*>(isolate), *p)));
+}
+#endif
+
+
+template <class T>
+Local<T> Local<T>::New(Isolate* isolate, T* that) {
+ if (that == NULL) return Local<T>();
+ T* that_ptr = that;
internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
reinterpret_cast<internal::Isolate*>(isolate), *p)));
@@ -4869,14 +5210,26 @@
template <class T>
Persistent<T> Persistent<T>::New(Handle<T> that) {
- return New(Isolate::GetCurrent(), that);
+ return New(Isolate::GetCurrent(), that.val_);
}
template <class T>
Persistent<T> Persistent<T>::New(Isolate* isolate, Handle<T> that) {
- if (that.IsEmpty()) return Persistent<T>();
- internal::Object** p = reinterpret_cast<internal::Object**>(*that);
+ return New(Isolate::GetCurrent(), that.val_);
+}
+
+#ifndef V8_USE_UNSAFE_HANDLES
+template <class T>
+Persistent<T> Persistent<T>::New(Isolate* isolate, Persistent<T> that) {
+ return New(Isolate::GetCurrent(), that.val_);
+}
+#endif
+
+template <class T>
+Persistent<T> Persistent<T>::New(Isolate* isolate, T* that) {
+ if (that == NULL) return Persistent<T>();
+ internal::Object** p = reinterpret_cast<internal::Object**>(that);
return Persistent<T>(reinterpret_cast<T*>(
V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate),
p)));
@@ -4894,7 +5247,7 @@
typedef internal::Internals I;
if (this->IsEmpty()) return false;
if (!I::IsInitialized(isolate)) return false;
- return I::GetNodeFlag(reinterpret_cast<internal::Object**>(**this),
+ return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
I::kNodeIsIndependentShift);
}
@@ -4910,7 +5263,7 @@
typedef internal::Internals I;
if (this->IsEmpty()) return false;
if (!I::IsInitialized(isolate)) return false;
- return I::GetNodeState(reinterpret_cast<internal::Object**>(**this)) ==
+ return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
I::kNodeStateIsNearDeathValue;
}
@@ -4926,7 +5279,7 @@
typedef internal::Internals I;
if (this->IsEmpty()) return false;
if (!I::IsInitialized(isolate)) return false;
- return I::GetNodeState(reinterpret_cast<internal::Object**>(**this)) ==
+ return I::GetNodeState(reinterpret_cast<internal::Object**>(this->val_)) ==
I::kNodeStateIsWeakValue;
}
@@ -4941,29 +5294,45 @@
void Persistent<T>::Dispose(Isolate* isolate) {
if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
+ reinterpret_cast<internal::Object**>(this->val_));
+#ifndef V8_USE_UNSAFE_HANDLES
+ val_ = 0;
+#endif
}
template <class T>
-Persistent<T>::Persistent() : Handle<T>() { }
-
-template <class T>
-void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
- Isolate* isolate = Isolate::GetCurrent();
+template <typename S, typename P>
+void Persistent<T>::MakeWeak(
+ Isolate* isolate,
+ P* parameters,
+ typename WeakReferenceCallbacks<S, P>::Revivable callback) {
+ TYPE_CHECK(S, T);
+ typedef typename WeakReferenceCallbacks<Value, void>::Revivable Revivable;
V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this),
+ reinterpret_cast<internal::Object**>(this->val_),
parameters,
- callback,
+ reinterpret_cast<Revivable>(callback),
NULL);
}
+
+template <class T>
+template <typename P>
+void Persistent<T>::MakeWeak(
+ Isolate* isolate,
+ P* parameters,
+ typename WeakReferenceCallbacks<T, P>::Revivable callback) {
+ MakeWeak<T, P>(isolate, parameters, callback);
+}
+
+
template <class T>
void Persistent<T>::MakeWeak(Isolate* isolate,
void* parameters,
NearDeathCallback callback) {
V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this),
+ reinterpret_cast<internal::Object**>(this->val_),
parameters,
NULL,
callback);
@@ -4977,7 +5346,7 @@
template <class T>
void Persistent<T>::ClearWeak(Isolate* isolate) {
V8::ClearWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
+ reinterpret_cast<internal::Object**>(this->val_));
}
template <class T>
@@ -4990,7 +5359,7 @@
typedef internal::Internals I;
if (this->IsEmpty()) return;
if (!I::IsInitialized(isolate)) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(**this),
+ I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
true,
I::kNodeIsIndependentShift);
}
@@ -5005,7 +5374,7 @@
typedef internal::Internals I;
if (this->IsEmpty()) return;
if (!I::IsInitialized(isolate)) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(**this),
+ I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
true,
I::kNodeIsPartiallyDependentShift);
}
@@ -5016,11 +5385,27 @@
}
template <class T>
+void Persistent<T>::Reset(Isolate* isolate, const Handle<T>& other) {
+ Dispose(isolate);
+#ifdef V8_USE_UNSAFE_HANDLES
+ *this = *New(isolate, other);
+#else
+ if (other.IsEmpty()) {
+ this->val_ = NULL;
+ return;
+ }
+ internal::Object** p = reinterpret_cast<internal::Object**>(other.val_);
+ this->val_ = reinterpret_cast<T*>(
+ V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate), p));
+#endif
+}
+
+template <class T>
void Persistent<T>::SetWrapperClassId(Isolate* isolate, uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
if (!I::IsInitialized(isolate)) return;
- internal::Object** obj = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
*reinterpret_cast<uint16_t*>(addr) = class_id;
}
@@ -5035,7 +5420,7 @@
typedef internal::Internals I;
if (this->IsEmpty()) return 0;
if (!I::IsInitialized(isolate)) return 0;
- internal::Object** obj = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** obj = reinterpret_cast<internal::Object**>(this->val_);
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
return *reinterpret_cast<uint16_t*>(addr);
}
diff --git a/samples/lineprocessor.cc b/samples/lineprocessor.cc
index b5b6367..2ce31b4 100644
--- a/samples/lineprocessor.cc
+++ b/samples/lineprocessor.cc
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include <v8.h>
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -124,7 +128,9 @@
// "evaluate" command, because it must be executed some context.
// In our sample we have only one context, so there is nothing really to
// think about.
- v8::Context::Scope scope(debug_message_context);
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope scope(isolate, debug_message_context);
v8::Debug::ProcessDebugMessages();
}
@@ -136,8 +142,8 @@
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::String> script_source(NULL);
- v8::Handle<v8::Value> script_name(NULL);
+ v8::Handle<v8::String> script_source;
+ v8::Handle<v8::Value> script_name;
int script_param_counter = 0;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -209,7 +215,7 @@
// Create a new execution environment containing the built-in
// functions
- v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
diff --git a/samples/process.cc b/samples/process.cc
index 4dcc09a..fd3a821 100644
--- a/samples/process.cc
+++ b/samples/process.cc
@@ -25,6 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove this
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include <v8.h>
#include <string>
@@ -163,11 +168,11 @@
// is what we need for the reference to remain after we return from
// this method. That persistent handle has to be disposed in the
// destructor.
- context_ = Context::New(NULL, global);
+ context_.Reset(GetIsolate(), Context::New(GetIsolate(), NULL, global));
// Enter the new context so all the following operations take place
// within it.
- Context::Scope context_scope(context_);
+ Context::Scope context_scope(GetIsolate(), context_);
// Make the options mapping available within the context
if (!InstallMaps(opts, output))
@@ -250,7 +255,7 @@
// Enter this processor's context so all the remaining operations
// take place there
- Context::Scope context_scope(context_);
+ Context::Scope context_scope(GetIsolate(), context_);
// Wrap the C++ request object in a JavaScript wrapper
Handle<Object> request_obj = WrapRequest(request);
@@ -303,7 +308,8 @@
Handle<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
map_template_ = Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
- Handle<ObjectTemplate> templ = map_template_;
+ Handle<ObjectTemplate> templ =
+ Local<ObjectTemplate>::New(GetIsolate(), map_template_);
// Create an empty map wrapper.
Handle<Object> result = templ->NewInstance();
@@ -410,7 +416,8 @@
request_template_ =
Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
- Handle<ObjectTemplate> templ = request_template_;
+ Handle<ObjectTemplate> templ =
+ Local<ObjectTemplate>::New(GetIsolate(), request_template_);
// Create an empty http request wrapper.
Handle<Object> result = templ->NewInstance();
diff --git a/samples/shell.cc b/samples/shell.cc
index 0b71c2c..da18cc7 100644
--- a/samples/shell.cc
+++ b/samples/shell.cc
@@ -25,6 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove this
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include <v8.h>
#include <assert.h>
#include <fcntl.h>
@@ -45,7 +50,7 @@
*/
-v8::Persistent<v8::Context> CreateShellContext();
+v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate);
void RunShell(v8::Handle<v8::Context> context);
int RunMain(v8::Isolate* isolate, int argc, char* argv[]);
bool ExecuteString(v8::Isolate* isolate,
@@ -72,7 +77,7 @@
int result;
{
v8::HandleScope handle_scope(isolate);
- v8::Persistent<v8::Context> context = CreateShellContext();
+ v8::Handle<v8::Context> context = CreateShellContext(isolate);
if (context.IsEmpty()) {
fprintf(stderr, "Error creating context\n");
return 1;
@@ -81,7 +86,6 @@
result = RunMain(isolate, argc, argv);
if (run_shell) RunShell(context);
context->Exit();
- context.Dispose(isolate);
}
v8::V8::Dispose();
return result;
@@ -96,7 +100,7 @@
// Creates a new execution environment containing the built-in
// functions.
-v8::Persistent<v8::Context> CreateShellContext() {
+v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
@@ -110,7 +114,7 @@
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
- return v8::Context::New(NULL, global);
+ return v8::Context::New(isolate, NULL, global);
}
diff --git a/src/api.cc b/src/api.cc
index 4352ef3..8a6eaf4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "api.h"
#include <string.h> // For memcpy, strlen.
@@ -625,7 +628,7 @@
void V8::MakeWeak(i::Isolate* isolate,
i::Object** object,
void* parameters,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
@@ -2409,6 +2412,46 @@
}
+bool Value::IsArrayBuffer() const {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
+ return false;
+ return Utils::OpenHandle(this)->IsJSArrayBuffer();
+}
+
+
+bool Value::IsTypedArray() const {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()"))
+ return false;
+ return Utils::OpenHandle(this)->IsJSTypedArray();
+}
+
+
+#define TYPED_ARRAY_LIST(F) \
+F(Uint8Array, kExternalUnsignedByteArray) \
+F(Int8Array, kExternalByteArray) \
+F(Uint16Array, kExternalUnsignedShortArray) \
+F(Int16Array, kExternalShortArray) \
+F(Uint32Array, kExternalUnsignedIntArray) \
+F(Int32Array, kExternalIntArray) \
+F(Float32Array, kExternalFloatArray) \
+F(Float64Array, kExternalDoubleArray) \
+F(Uint8ClampedArray, kExternalPixelArray)
+
+
+#define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \
+ bool Value::Is##TypedArray() const { \
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::Is" #TypedArray "()")) \
+ return false; \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ if (!obj->IsJSTypedArray()) return false; \
+ return i::JSTypedArray::cast(*obj)->type() == type_const; \
+ }
+
+TYPED_ARRAY_LIST(VALUE_IS_TYPED_ARRAY)
+
+#undef VALUE_IS_TYPED_ARRAY
+
+
bool Value::IsObject() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false;
return Utils::OpenHandle(this)->IsJSObject();
@@ -2776,14 +2819,7 @@
}
-CHECK_TYPED_ARRAY_CAST(Uint8Array, kExternalUnsignedByteArray)
-CHECK_TYPED_ARRAY_CAST(Int8Array, kExternalByteArray)
-CHECK_TYPED_ARRAY_CAST(Uint16Array, kExternalUnsignedShortArray)
-CHECK_TYPED_ARRAY_CAST(Int16Array, kExternalShortArray)
-CHECK_TYPED_ARRAY_CAST(Uint32Array, kExternalUnsignedIntArray)
-CHECK_TYPED_ARRAY_CAST(Int32Array, kExternalIntArray)
-CHECK_TYPED_ARRAY_CAST(Float32Array, kExternalFloatArray)
-CHECK_TYPED_ARRAY_CAST(Float64Array, kExternalDoubleArray)
+TYPED_ARRAY_LIST(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
@@ -3314,7 +3350,7 @@
const char* postfix = "]";
int prefix_len = i::StrLength(prefix);
- int str_len = str->Length();
+ int str_len = str->Utf8Length();
int postfix_len = i::StrLength(postfix);
int buf_len = prefix_len + str_len + postfix_len;
@@ -3326,7 +3362,7 @@
ptr += prefix_len;
// Write real content.
- str->WriteAscii(ptr, 0, str_len);
+ str->WriteUtf8(ptr, str_len);
ptr += str_len;
// Write postfix.
@@ -5944,6 +5980,8 @@
TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Uint8ClampedArray, uint8_t, kExternalPixelArray,
+ i::EXTERNAL_PIXEL_ELEMENTS)
TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
i::EXTERNAL_BYTE_ELEMENTS)
TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
@@ -6039,6 +6077,19 @@
}
+#ifdef DEBUG
+v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate)
+ : isolate_(isolate),
+ last_state_(i::EnterAllocationScope(
+ reinterpret_cast<i::Isolate*>(isolate), false)) {
+}
+
+v8::AssertNoGCScope::~AssertNoGCScope() {
+ i::ExitAllocationScope(reinterpret_cast<i::Isolate*>(isolate_), last_state_);
+}
+#endif
+
+
void V8::IgnoreOutOfMemoryException() {
EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
}
@@ -6451,9 +6502,10 @@
TryCatch try_catch;
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
- length_ = str->Length();
+ length_ = str->Utf8Length();
str_ = i::NewArray<char>(length_ + 1);
- str->WriteAscii(str_);
+ str->WriteUtf8(str_);
+ ASSERT(i::String::NonAsciiStart(str_, length_) >= length_);
}
diff --git a/src/api.h b/src/api.h
index a956346..686abf7 100644
--- a/src/api.h
+++ b/src/api.h
@@ -173,6 +173,7 @@
V(ArrayBuffer, JSArrayBuffer) \
V(TypedArray, JSTypedArray) \
V(Uint8Array, JSTypedArray) \
+ V(Uint8ClampedArray, JSTypedArray) \
V(Int8Array, JSTypedArray) \
V(Uint16Array, JSTypedArray) \
V(Int16Array, JSTypedArray) \
@@ -222,6 +223,8 @@
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8Array> ToLocalUint8Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint8ClampedArray> ToLocalUint8ClampedArray(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Int8Array> ToLocalInt8Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint16Array> ToLocalUint16Array(
@@ -291,12 +294,21 @@
}
+class InternalHandleHelper {
+ public:
+ template<class From, class To>
+ static inline Local<To> Convert(v8::internal::Handle<From> obj) {
+ return Local<To>(reinterpret_cast<To*>(obj.location()));
+ }
+};
+
+
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
ASSERT(obj.is_null() || !obj->IsTheHole()); \
- return Local<To>(reinterpret_cast<To*>(obj.location())); \
+ return InternalHandleHelper::Convert<v8::internal::From, v8::To>(obj); \
}
@@ -305,8 +317,8 @@
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
ASSERT(obj.is_null() || !obj->IsTheHole()); \
ASSERT(obj->type() == typeConst); \
- return Local<v8::TypedArray>( \
- reinterpret_cast<v8::TypedArray*>(obj.location())); \
+ return InternalHandleHelper:: \
+ Convert<v8::internal::JSTypedArray, v8::TypedArray>(obj); \
}
@@ -322,6 +334,7 @@
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint8ClampedArray, kExternalPixelArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 7b7fae3..86da76a 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -74,6 +74,28 @@
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -116,9 +138,10 @@
int constant_stack_parameter_count) {
// register state
// r0 -- number of arguments
+ // r1 -- function
// r2 -- type info cell with elements kind
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r1, r2 };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &r0;
@@ -3777,12 +3800,6 @@
Register InstanceofStub::right() { return r1; }
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -4734,6 +4751,7 @@
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(r3, &miss);
__ cmp(r3, Operand(terminal_kind_sentinel));
__ b(gt, &miss);
// Make sure the function is the Array() function
@@ -5942,8 +5960,36 @@
__ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ mov(r4, Operand(high_promotion_mode));
+ __ ldr(r4, MemOperand(r4, 0));
+ __ cmp(r4, Operand::Zero());
+ __ b(eq, &skip_write_barrier);
+
+ __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
+ __ RecordWriteField(r7,
+ ConsString::kFirstOffset,
+ r0,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ RecordWriteField(r7,
+ ConsString::kSecondOffset,
+ r1,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
__ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+
+ __ bind(&after_writing);
+
__ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -6789,6 +6835,9 @@
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
+ // StringAddStub::Generate
+ { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7313,14 +7362,8 @@
// Get the elements kind and case on that.
__ cmp(r2, Operand(undefined_sentinel));
__ b(eq, &no_info);
- __ ldr(r3, FieldMemOperand(r2, kPointerSize));
-
- // There is no info if the call site went megamorphic either
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ cmp(r3,
- Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate())));
- __ b(eq, &no_info);
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(r3, &no_info);
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0ef4be0..0bc1f48 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1593,7 +1593,8 @@
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
@@ -1939,11 +1940,12 @@
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ b(ne, &resume);
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1955,18 +1957,7 @@
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2074,6 +2065,55 @@
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ mov(r1, Operand(map));
+ __ pop(r2);
+ __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2,
+ FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ str(r3,
+ FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
+ r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ ldr(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 893ac4e..c644be5 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1180,6 +1180,25 @@
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r2 : key
+ // -- r1 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(r1, r2, r0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 7e81ca6..3fe46ff 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -677,7 +677,7 @@
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -1305,8 +1305,8 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1484,15 +1484,15 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
+ left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
@@ -1602,8 +1602,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1634,8 +1634,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2155,12 +2155,6 @@
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2324,17 +2318,22 @@
: UseRegisterAtStart(instr->object());
}
- LOperand* val =
- needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())
- ? UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
@@ -2453,7 +2452,7 @@
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 11675e9..116d576 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -122,7 +122,6 @@
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1574,18 +1573,6 @@
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index dfacf4c..3a0f476 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -3057,31 +3057,20 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
- if (!FLAG_track_double_fields) {
- ASSERT(!instr->hydrogen()->representation().IsDouble());
- }
- Register temp = instr->hydrogen()->representation().IsDouble()
- ? scratch0() : ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ ldr(temp, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ ldr(temp, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(temp, FieldMemOperand(temp, instr->hydrogen()->offset()));
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vldr(result, FieldMemOperand(object, offset));
+ return;
}
- if (instr->hydrogen()->representation().IsDouble()) {
- Label load_from_heap_number, done;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- SwVfpRegister flt_scratch = double_scratch0().low();
- __ JumpIfNotSmi(temp, &load_from_heap_number);
- __ SmiUntag(temp);
- __ vmov(flt_scratch, temp);
- __ vcvt_f64_s32(result, flt_scratch);
- __ b(&done);
- __ bind(&load_from_heap_number);
- __ vldr(result, FieldMemOperand(temp, HeapNumber::kValueOffset));
- __ bind(&done);
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ ldr(result, FieldMemOperand(object, offset));
+ } else {
+ __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ ldr(result, FieldMemOperand(result, offset));
}
}
@@ -3227,40 +3216,6 @@
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- // |scratch| still contains |input|'s map.
- __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ ubfx(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
- __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(le, &done);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -4233,8 +4188,7 @@
__ mov(r0, Operand(instr->arity()));
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -4264,29 +4218,26 @@
Representation representation = instr->representation();
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
Register scratch = scratch0();
int offset = instr->offset();
+ Handle<Map> transition = instr->transition();
+
if (FLAG_track_fields && representation.IsSmi()) {
+ Register value = ToRegister(instr->value());
__ SmiTag(value, value, SetCC);
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(vs, instr->environment());
}
- } else if (FLAG_track_double_fields && representation.IsDouble() &&
- !instr->hydrogen()->value()->type().IsSmi() &&
- !instr->hydrogen()->value()->type().IsHeapNumber()) {
- Label do_store;
- __ JumpIfSmi(value, &do_store);
- Handle<Map> map(isolate()->factory()->heap_number_map());
-
- __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- DoCheckMapCommon(scratch, map, REQUIRE_EXACT_MAP, instr->environment());
- __ bind(&do_store);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ __ vstr(value, FieldMemOperand(object, offset));
+ return;
}
- Handle<Map> transition = instr->transition();
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
@@ -4308,6 +4259,8 @@
}
// Do the store.
+ Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@@ -5159,6 +5112,8 @@
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -5594,7 +5549,8 @@
// Pick the right runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index bae5060..6e0b4a7 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1933,8 +1933,34 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ mov(scratch1, Operand(high_promotion_mode));
+ ldr(scratch1, MemOperand(scratch1, 0));
+ cmp(scratch1, Operand::Zero());
+ b(eq, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
InitializeNewString(result,
length,
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 14aa898..127bf3f 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -315,11 +315,13 @@
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -451,8 +453,10 @@
Register value_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// r0 : value
Label exit;
@@ -474,16 +478,6 @@
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
- if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ bind(&do_store);
- }
-
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -499,7 +493,7 @@
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -518,6 +512,30 @@
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ vmov(s0, scratch1);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_restore_name, DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -546,7 +564,7 @@
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -564,7 +582,11 @@
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
@@ -572,7 +594,11 @@
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(receiver_reg,
offset,
name_reg,
@@ -586,7 +612,11 @@
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(value_reg, FieldMemOperand(scratch1, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ str(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ str(value_reg, FieldMemOperand(scratch1, offset));
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
@@ -594,7 +624,11 @@
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(scratch1,
offset,
name_reg,
@@ -652,11 +686,36 @@
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ ldr(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ vmov(s0, scratch2);
+ __ vcvt_f64_s32(d0, s0);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
miss_label, DONT_DO_SMI_CHECK);
+ __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
__ bind(&do_store);
+ __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register r0).
+ ASSERT(value_reg.is(r0));
+ __ Ret();
+ return;
}
// TODO(verwaest): Share this code as a code stub.
@@ -1309,9 +1368,20 @@
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1535,7 +1605,8 @@
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
diff --git a/src/arraybuffer.js b/src/arraybuffer.js
new file mode 100644
index 0000000..2b0c3dd
--- /dev/null
+++ b/src/arraybuffer.js
@@ -0,0 +1,100 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var $ArrayBuffer = global.ArrayBuffer;
+
+// -------------------------------------------------------------------
+
+function ArrayBufferConstructor(byteLength) { // length = 1
+ if (%_IsConstructCall()) {
+ var l = TO_POSITIVE_INTEGER(byteLength);
+ %ArrayBufferInitialize(this, l);
+ } else {
+ return new $ArrayBuffer(byteLength);
+ }
+}
+
+function ArrayBufferGetByteLength() {
+ if (!IS_ARRAYBUFFER(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['ArrayBuffer.prototype.byteLength', this]);
+ }
+ return %ArrayBufferGetByteLength(this);
+}
+
+// ES6 Draft 15.13.5.5.3
+function ArrayBufferSlice(start, end) {
+ if (!IS_ARRAYBUFFER(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['ArrayBuffer.prototype.slice', this]);
+ }
+
+ var relativeStart = TO_INTEGER(start);
+ var first;
+ if (relativeStart < 0) {
+ first = MathMax(this.byteLength + relativeStart, 0);
+ } else {
+ first = MathMin(relativeStart, this.byteLength);
+ }
+ var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
+ var fin;
+ if (relativeEnd < 0) {
+ fin = MathMax(this.byteLength + relativeEnd, 0);
+ } else {
+ fin = MathMin(relativeEnd, this.byteLength);
+ }
+
+ var newLen = fin - first;
+ // TODO(dslomov): implement inheritance
+ var result = new $ArrayBuffer(newLen);
+
+ %ArrayBufferSliceImpl(this, result, first);
+ return result;
+}
+
+function SetUpArrayBuffer() {
+ %CheckIsBootstrapping();
+
+ // Set up the ArrayBuffer constructor function.
+ %SetCode($ArrayBuffer, ArrayBufferConstructor);
+ %FunctionSetPrototype($ArrayBuffer, new $Object());
+
+ // Set up the constructor property on the ArrayBuffer prototype object.
+ %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
+
+ InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
+
+ InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
+ "slice", ArrayBufferSlice
+ ));
+}
+
+SetUpArrayBuffer();
+
+
diff --git a/src/assembler.cc b/src/assembler.cc
index fff588a..6b0c4b8 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1203,6 +1203,13 @@
}
+ExternalReference ExternalReference::
+ new_space_high_promotion_mode_active_address(Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->NewSpaceHighPromotionModeActiveAddress());
+}
+
+
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
diff --git a/src/assembler.h b/src/assembler.h
index 32424cf..6abd5c5 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -757,6 +757,8 @@
Isolate* isolate);
static ExternalReference old_data_space_allocation_limit_address(
Isolate* isolate);
+ static ExternalReference new_space_high_promotion_mode_active_address(
+ Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
diff --git a/src/ast.h b/src/ast.h
index 10ae7de..9ffb00d 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -277,6 +277,14 @@
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
+ void AddMapIfMissing(Handle<Map> map, Zone* zone) {
+ map = Map::CurrentMapForDeprecated(map);
+ for (int i = 0; i < length(); ++i) {
+ if (at(i).is_identical_to(map)) return;
+ }
+ Add(map, zone);
+ }
+
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
@@ -1324,10 +1332,9 @@
return constant_properties_;
}
ZoneList<Property*>* properties() const { return properties_; }
-
bool fast_elements() const { return fast_elements_; }
-
- bool has_function() { return has_function_; }
+ bool may_store_doubles() const { return may_store_doubles_; }
+ bool has_function() const { return has_function_; }
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1354,17 +1361,20 @@
bool is_simple,
bool fast_elements,
int depth,
+ bool may_store_doubles,
bool has_function)
: MaterializedLiteral(isolate, literal_index, is_simple, depth),
constant_properties_(constant_properties),
properties_(properties),
fast_elements_(fast_elements),
+ may_store_doubles_(may_store_doubles),
has_function_(has_function) {}
private:
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
bool fast_elements_;
+ bool may_store_doubles_;
bool has_function_;
};
@@ -2849,10 +2859,11 @@
bool is_simple,
bool fast_elements,
int depth,
+ bool may_store_doubles,
bool has_function) {
ObjectLiteral* lit = new(zone_) ObjectLiteral(
isolate_, constant_properties, properties, literal_index,
- is_simple, fast_elements, depth, has_function);
+ is_simple, fast_elements, depth, may_store_doubles, has_function);
VISIT_AND_RETURN(ObjectLiteral, lit)
}
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 16567b5..b0d3a5e 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1317,34 +1317,36 @@
}
}
+ if (FLAG_harmony_array_buffer) {
+ // -- A r r a y B u f f e r
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
+ native_context()->set_array_buffer_fun(*array_buffer_fun);
+ }
+
if (FLAG_harmony_typed_arrays) {
- { // -- A r r a y B u f f e r
- Handle<JSFunction> array_buffer_fun =
- InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
- native_context()->set_array_buffer_fun(*array_buffer_fun);
- }
- {
- // -- T y p e d A r r a y s
- Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array");
- native_context()->set_int8_array_fun(*int8_fun);
- Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array");
- native_context()->set_uint8_array_fun(*uint8_fun);
- Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array");
- native_context()->set_int16_array_fun(*int16_fun);
- Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array");
- native_context()->set_uint16_array_fun(*uint16_fun);
- Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array");
- native_context()->set_int32_array_fun(*int32_fun);
- Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array");
- native_context()->set_uint32_array_fun(*uint32_fun);
- Handle<JSFunction> float_fun = InstallTypedArray("Float32Array");
- native_context()->set_float_array_fun(*float_fun);
- Handle<JSFunction> double_fun = InstallTypedArray("Float64Array");
- native_context()->set_double_array_fun(*double_fun);
- }
+ // -- T y p e d A r r a y s
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array");
+ native_context()->set_int8_array_fun(*int8_fun);
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array");
+ native_context()->set_uint8_array_fun(*uint8_fun);
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array");
+ native_context()->set_int16_array_fun(*int16_fun);
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array");
+ native_context()->set_uint16_array_fun(*uint16_fun);
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array");
+ native_context()->set_int32_array_fun(*int32_fun);
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array");
+ native_context()->set_uint32_array_fun(*uint32_fun);
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array");
+ native_context()->set_float_array_fun(*float_fun);
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array");
+ native_context()->set_double_array_fun(*double_fun);
+ Handle<JSFunction> uint8c_fun = InstallTypedArray("Uint8ClampedArray");
+ native_context()->set_uint8c_array_fun(*uint8c_fun);
}
if (FLAG_harmony_generators) {
@@ -1385,6 +1387,40 @@
*generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
+
+ // Create a map for generator result objects.
+ ASSERT(object_map->inobject_properties() == 0);
+ STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
+ Handle<Map> generator_result_map = factory()->CopyMap(object_map,
+ JSGeneratorObject::kResultPropertyCount);
+ ASSERT(generator_result_map->inobject_properties() ==
+ JSGeneratorObject::kResultPropertyCount);
+
+ Handle<DescriptorArray> descriptors = factory()->NewDescriptorArray(0,
+ JSGeneratorObject::kResultPropertyCount);
+ DescriptorArray::WhitenessWitness witness(*descriptors);
+ generator_result_map->set_instance_descriptors(*descriptors);
+
+ Handle<String> value_string = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("value"));
+ FieldDescriptor value_descr(*value_string,
+ JSGeneratorObject::kResultValuePropertyIndex,
+ NONE,
+ Representation::Tagged());
+ generator_result_map->AppendDescriptor(&value_descr, witness);
+
+ Handle<String> done_string = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("done"));
+ FieldDescriptor done_descr(*done_string,
+ JSGeneratorObject::kResultDonePropertyIndex,
+ NONE,
+ Representation::Tagged());
+ generator_result_map->AppendDescriptor(&done_descr, witness);
+
+ generator_result_map->set_unused_property_fields(0);
+ ASSERT_EQ(JSGeneratorObject::kResultSize,
+ generator_result_map->instance_size());
+ native_context()->set_generator_result_map(*generator_result_map);
}
}
@@ -1990,6 +2026,11 @@
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_array_buffer &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native arraybuffer.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
if (FLAG_harmony_typed_arrays &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native typedarray.js") == 0) {
@@ -2368,14 +2409,15 @@
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case FIELD: {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
- Handle<Object> value = Handle<Object>(from->FastPropertyAt(index),
+ ASSERT(!descs->GetDetails(i).representation().IsDouble());
+ Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
isolate());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
@@ -2402,10 +2444,8 @@
// Add to dictionary.
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- Representation::Tagged(),
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), CALLBACKS, i + 1);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
diff --git a/src/builtins.cc b/src/builtins.cc
index 149a649..661ee94 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -194,55 +194,58 @@
}
-#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
- Arguments* name = reinterpret_cast<Arguments*>(args[0]);
-
-
RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
- CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
- ASSERT(args.length() == 2);
- Handle<Object> type_info = args.at<Object>(1);
+ // If we get 2 arguments then they are the stub parameters (constructor, type
+ // info). If we get 3, then the first one is a pointer to the arguments
+ // passed by the caller.
+ Arguments empty_args(0, NULL);
+ bool no_caller_args = args.length() == 2;
+ ASSERT(no_caller_args || args.length() == 3);
+ int parameters_start = no_caller_args ? 0 : 1;
+ Arguments* caller_args = no_caller_args
+ ? &empty_args
+ : reinterpret_cast<Arguments*>(args[0]);
+ Handle<JSFunction> constructor = args.at<JSFunction>(parameters_start);
+ Handle<Object> type_info = args.at<Object>(parameters_start + 1);
- JSArray* array = NULL;
bool holey = false;
if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
int value = Smi::cast((*caller_args)[0])->value();
holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
}
+ JSArray* array;
MaybeObject* maybe_array;
- if (*type_info != isolate->heap()->undefined_value()) {
+ if (*type_info != isolate->heap()->undefined_value() &&
+ JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi()) {
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
- if (cell->value()->IsSmi()) {
- Smi* smi = Smi::cast(cell->value());
- ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- // Update the allocation site info to reflect the advice alteration.
- cell->set_value(Smi::FromInt(to_kind));
- }
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ cell->set_value(Smi::FromInt(to_kind));
+ }
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind);
- if (mode == TRACK_ALLOCATION_SITE) {
- maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite(
- to_kind, type_info);
- } else {
- maybe_array = isolate->heap()->AllocateEmptyJSArray(to_kind);
- }
- if (!maybe_array->To(&array)) return maybe_array;
+ maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
+ *constructor, type_info);
+ if (!maybe_array->To(&array)) return maybe_array;
+ } else {
+ ElementsKind kind = constructor->initial_map()->elements_kind();
+ ASSERT(kind == GetInitialFastElementsKind());
+ maybe_array = isolate->heap()->AllocateJSObject(*constructor);
+ if (!maybe_array->To(&array)) return maybe_array;
+ // We might need to transition to holey
+ if (holey) {
+ kind = GetHoleyElementsKind(kind);
+ maybe_array = array->TransitionElementsKind(kind);
+ if (maybe_array->IsFailure()) return maybe_array;
}
}
- ElementsKind kind = GetInitialFastElementsKind();
- if (holey) {
- kind = GetHoleyElementsKind(kind);
- }
-
- if (array == NULL) {
- maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
- if (!maybe_array->To(&array)) return maybe_array;
- }
-
+ maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ if (maybe_array->IsFailure()) return maybe_array;
maybe_array = ArrayConstructInitializeElements(array, caller_args);
if (maybe_array->IsFailure()) return maybe_array;
return array;
@@ -1508,6 +1511,11 @@
KeyedLoadIC::GenerateNonStrictArguments(masm);
}
+static void Generate_StoreIC_Slow(MacroAssembler* masm) {
+ StoreIC::GenerateSlow(masm);
+}
+
+
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
diff --git a/src/builtins.h b/src/builtins.h
index 8df48a8..6fc17c4 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -124,6 +124,8 @@
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 04b9a46..31431b7 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -82,6 +82,24 @@
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
+ class ArrayContextChecker {
+ public:
+ ArrayContextChecker(HGraphBuilder* builder, HValue* constructor,
+ HValue* array_function)
+ : checker_(builder) {
+ checker_.If<HCompareObjectEqAndBranch, HValue*>(constructor,
+ array_function);
+ checker_.Then();
+ }
+
+ ~ArrayContextChecker() {
+ checker_.ElseDeopt();
+ checker_.End();
+ }
+ private:
+ IfBuilder checker_;
+ };
+
private:
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
@@ -240,7 +258,8 @@
GetCodeKind(),
GetICState(),
GetExtraICState(),
- GetStubType(), -1);
+ GetStubType(),
+ GetStubFlags());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -290,8 +309,7 @@
checker.Then();
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
- HValue* elements =
- AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
+ HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.IfCompareMap(elements, factory->fixed_cow_array_map());
@@ -410,6 +428,36 @@
}
+template<>
+HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
+ Representation representation = casted_stub()->representation();
+ HInstruction* load = AddInstruction(DoBuildLoadNamedField(
+ GetParameter(0), casted_stub()->is_inobject(),
+ representation, casted_stub()->offset()));
+ return load;
+}
+
+
+Handle<Code> LoadFieldStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template<>
+HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
+ Representation representation = casted_stub()->representation();
+ HInstruction* load = AddInstruction(DoBuildLoadNamedField(
+ GetParameter(0), casted_stub()->is_inobject(),
+ representation, casted_stub()->offset()));
+ return load;
+}
+
+
+Handle<Code> KeyedLoadFieldStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
template <>
HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
@@ -453,8 +501,7 @@
if_builder.Else();
- HInstruction* elements =
- AddInstruction(new(zone) HLoadElements(js_array, js_array));
+ HInstruction* elements = AddLoadElements(js_array);
HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
@@ -495,6 +542,10 @@
// -- Parameter 1 : type info cell
// -- Parameter 0 : constructor
// -----------------------------------
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker(this,
+ GetParameter(ArrayConstructorStubBase::kConstructor),
+ array_function);
// Get the right map
// Should be a constant
JSArrayBuilder array_builder(
@@ -514,6 +565,10 @@
template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker(this,
+ GetParameter(ArrayConstructorStubBase::kConstructor),
+ array_function);
// Smi check and range check on the input arg.
HValue* constant_one = graph()->GetConstant1();
HValue* constant_zero = graph()->GetConstant0();
@@ -567,6 +622,10 @@
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
+ HInstruction* array_function = BuildGetArrayFunction(context());
+ ArrayContextChecker(this,
+ GetParameter(ArrayConstructorStubBase::kConstructor),
+ array_function);
ElementsKind kind = casted_stub()->elements_kind();
HValue* length = GetArgumentsLength();
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ea895d6..646aee2 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -87,7 +87,8 @@
V(ArrayConstructor) \
V(ProfileEntryHook) \
/* IC Handler stubs */ \
- V(LoadField)
+ V(LoadField) \
+ V(KeyedLoadField)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -185,6 +186,12 @@
virtual Code::ExtraICState GetExtraICState() {
return Code::kNoExtraICState;
}
+ virtual Code::StubType GetStubType() {
+ return Code::NORMAL;
+ }
+ virtual int GetStubFlags() {
+ return -1;
+ }
protected:
static bool CanUseFPRegisters();
@@ -192,9 +199,6 @@
// Generates the assembler code for the stub.
virtual Handle<Code> GenerateCode() = 0;
- virtual Code::StubType GetStubType() {
- return Code::NORMAL;
- }
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@@ -253,7 +257,6 @@
virtual Handle<Code> GenerateCode();
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
- virtual int GetStubFlags() { return -1; }
protected:
// Generates the assembler code for the stub.
@@ -754,42 +757,108 @@
};
-class HandlerStub: public ICStub {
+class HICStub: public HydrogenCodeStub {
public:
- explicit HandlerStub(Code::Kind kind) : ICStub(kind) { }
+ virtual Code::Kind GetCodeKind() const { return kind(); }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+
+ protected:
+ HICStub() : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { }
+ class KindBits: public BitField<Code::Kind, 0, 4> {};
+ virtual Code::Kind kind() const = 0;
+};
+
+
+class HandlerStub: public HICStub {
+ public:
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
virtual int GetStubFlags() { return kind(); }
+
+ protected:
+ HandlerStub() : HICStub() { }
};
class LoadFieldStub: public HandlerStub {
public:
- LoadFieldStub(Register reg, bool inobject, int index)
- : HandlerStub(Code::LOAD_IC),
- reg_(reg),
- inobject_(inobject),
- index_(index) { }
- virtual void Generate(MacroAssembler* masm);
+ LoadFieldStub(bool inobject, int index, Representation representation)
+ : HandlerStub() {
+ Initialize(Code::LOAD_IC, inobject, index, representation);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ Representation representation() {
+ if (unboxed_double()) return Representation::Double();
+ return Representation::Tagged();
+ }
+
+ virtual Code::Kind kind() const {
+ return KindBits::decode(bit_field_);
+ }
+
+ bool is_inobject() {
+ return InobjectBits::decode(bit_field_);
+ }
+
+ int offset() {
+ int index = IndexBits::decode(bit_field_);
+ int offset = index * kPointerSize;
+ if (is_inobject()) return offset;
+ return FixedArray::kHeaderSize + offset;
+ }
+
+ bool unboxed_double() {
+ return UnboxedDoubleBits::decode(bit_field_);
+ }
+
+ virtual Code::StubType GetStubType() { return Code::FIELD; }
protected:
- virtual Code::StubType GetStubType() { return Code::FIELD; }
+ LoadFieldStub() : HandlerStub() { }
+
+ void Initialize(Code::Kind kind,
+ bool inobject,
+ int index,
+ Representation representation) {
+ bool unboxed_double = FLAG_track_double_fields && representation.IsDouble();
+ bit_field_ = KindBits::encode(kind)
+ | InobjectBits::encode(inobject)
+ | IndexBits::encode(index)
+ | UnboxedDoubleBits::encode(unboxed_double);
+ }
private:
STATIC_ASSERT(KindBits::kSize == 4);
- class RegisterBits: public BitField<int, 4, 6> {};
- class InobjectBits: public BitField<bool, 10, 1> {};
- class IndexBits: public BitField<int, 11, 11> {};
+ class InobjectBits: public BitField<bool, 4, 1> {};
+ class IndexBits: public BitField<int, 5, 11> {};
+ class UnboxedDoubleBits: public BitField<bool, 16, 1> {};
virtual CodeStub::Major MajorKey() { return LoadField; }
- virtual int MinorKey() {
- return KindBits::encode(kind())
- | RegisterBits::encode(reg_.code())
- | InobjectBits::encode(inobject_)
- | IndexBits::encode(index_);
+ virtual int NotMissMinorKey() { return bit_field_; }
+
+ int bit_field_;
+};
+
+
+class KeyedLoadFieldStub: public LoadFieldStub {
+ public:
+ KeyedLoadFieldStub(bool inobject, int index, Representation representation)
+ : LoadFieldStub() {
+ Initialize(Code::KEYED_LOAD_IC, inobject, index, representation);
}
- Register reg_;
- bool inobject_;
- int index_;
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ virtual Handle<Code> GenerateCode();
+
+ private:
+ virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
};
@@ -1596,7 +1665,8 @@
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kPropertyCell = 0;
+ static const int kConstructor = 0;
+ static const int kPropertyCell = 1;
private:
int NotMissMinorKey() { return bit_field_; }
diff --git a/src/compiler.cc b/src/compiler.cc
index dce8171..b7ff92a 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -144,7 +144,8 @@
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
- Code::NORMAL, -1);
+ code_stub()->GetStubType(),
+ code_stub()->GetStubFlags());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -299,14 +300,14 @@
//
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
- const int parameter_limit = -LUnallocated::kMinFixedIndex;
+ const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
info()->set_bailout_reason("too many parameters");
return AbortOptimization();
}
- const int locals_limit = LUnallocated::kMaxFixedIndex;
+ const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
if (!info()->osr_ast_id().IsNone() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
info()->set_bailout_reason("too many parameters/locals");
diff --git a/src/contexts.h b/src/contexts.h
index 2672487..434b274 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -132,6 +132,7 @@
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
+ V(UINT8C_ARRAY_FUN_INDEX, JSFunction, uint8c_array_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@@ -179,6 +180,7 @@
strict_mode_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
+ V(GENERATOR_RESULT_MAP_INDEX, Map, generator_result_map) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@@ -294,6 +296,7 @@
INT32_ARRAY_FUN_INDEX,
FLOAT_ARRAY_FUN_INDEX,
DOUBLE_ARRAY_FUN_INDEX,
+ UINT8C_ARRAY_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
@@ -321,6 +324,7 @@
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
+ GENERATOR_RESULT_MAP_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index a20de43..e1d29d9 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -77,7 +77,7 @@
// Print the event details.
Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(event_json));
+ Shell::DebugMessageDetails(isolate, Handle<String>::Cast(event_json));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
return;
@@ -114,7 +114,7 @@
// Convert the debugger command to a JSON debugger request.
Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
+ Shell::DebugCommandToJSONRequest(isolate, String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@@ -146,7 +146,8 @@
Handle<String> response = Handle<String>::Cast(response_val);
// Convert the debugger response into text details and the running state.
- Handle<Object> response_details = Shell::DebugMessageDetails(response);
+ Handle<Object> response_details =
+ Shell::DebugMessageDetails(isolate, response);
if (try_catch.HasCaught()) {
Shell::ReportException(isolate, &try_catch);
continue;
@@ -281,7 +282,8 @@
// Print the event details.
TryCatch try_catch;
Handle<Object> details =
- Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
+ Shell::DebugMessageDetails(isolate_,
+ Handle<String>::Cast(String::New(message)));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
@@ -310,7 +312,7 @@
// Convert the debugger command to a JSON debugger request.
TryCatch try_catch;
Handle<Value> request =
- Shell::DebugCommandToJSONRequest(String::New(command));
+ Shell::DebugCommandToJSONRequest(isolate_, String::New(command));
if (try_catch.HasCaught()) {
Shell::ReportException(isolate_, &try_catch);
PrintPrompt();
diff --git a/src/d8.cc b/src/d8.cc
index 39a64eb..1889556 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -40,6 +40,11 @@
#include <string.h>
#include <sys/stat.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#ifdef V8_SHARED
#include <assert.h>
#endif // V8_SHARED
@@ -124,8 +129,8 @@
}
#define DEFINE_STRING_GETTER(name, value) \
- static Persistent<String> name##_string(Isolate* isolate) { \
- return Get(isolate)->name##_string_; \
+ static Handle<String> name##_string(Isolate* isolate) { \
+ return Handle<String>(*Get(isolate)->name##_string_); \
}
FOR_EACH_STRING(DEFINE_STRING_GETTER)
#undef DEFINE_STRING_GETTER
@@ -245,7 +250,7 @@
} else {
PerIsolateData* data = PerIsolateData::Get(isolate);
Local<Context> realm =
- Local<Context>::New(data->realms_[data->realm_current_]);
+ Local<Context>::New(isolate, data->realms_[data->realm_current_]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
@@ -272,7 +277,7 @@
#if !defined(V8_SHARED)
} else {
v8::TryCatch try_catch;
- Context::Scope context_scope(utility_context_);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("Stringify"));
Handle<Value> argv[1] = { result };
@@ -421,7 +426,7 @@
}
Handle<Script> script = Script::New(args[1]->ToString());
if (script.IsEmpty()) return Undefined(isolate);
- Local<Context> realm = Local<Context>::New(data->realms_[index]);
+ Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
@@ -435,7 +440,7 @@
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (data->realm_shared_.IsEmpty()) return Undefined(isolate);
- return data->realm_shared_;
+ return Local<Value>::New(isolate, data->realm_shared_);
}
void Shell::RealmSharedSet(Local<String> property,
@@ -1057,14 +1062,14 @@
void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
- Persistent<Value> object,
- void* data) {
+ Persistent<Object>* object,
+ uint8_t* data) {
HandleScope scope(isolate);
- int32_t length = object->ToObject()->Get(
+ int32_t length = (*object)->Get(
PerIsolateData::byteLength_string(isolate))->Uint32Value();
isolate->AdjustAmountOfExternalAllocatedMemory(-length);
- delete[] static_cast<uint8_t*>(data);
- object.Dispose(isolate);
+ delete[] data;
+ object->Dispose(isolate);
}
@@ -1180,7 +1185,7 @@
Handle<String> text,
Handle<String> full) {
HandleScope handle_scope(isolate);
- Context::Scope context_scope(utility_context_);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("GetCompletions"));
static const int kArgc = 3;
@@ -1191,8 +1196,10 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
- Context::Scope context_scope(utility_context_);
+Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
+ Handle<String> message) {
+ HandleScope handle_scope(isolate);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
static const int kArgc = 1;
@@ -1202,8 +1209,10 @@
}
-Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
- Context::Scope context_scope(utility_context_);
+Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command) {
+ HandleScope handle_scope(isolate);
+ Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
static const int kArgc = 1;
@@ -1214,7 +1223,9 @@
void Shell::DispatchDebugMessages() {
- v8::Context::Scope scope(Shell::evaluation_context_);
+ Isolate* isolate = v8::Isolate::GetCurrent();
+ HandleScope handle_scope(isolate);
+ v8::Context::Scope scope(isolate, Shell::evaluation_context_);
v8::Debug::ProcessDebugMessages();
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -1327,7 +1338,7 @@
// utility, evaluation and debug context can all access each other.
utility_context_->SetSecurityToken(Undefined(isolate));
evaluation_context_->SetSecurityToken(Undefined(isolate));
- Context::Scope utility_scope(utility_context_);
+ Context::Scope utility_scope(isolate, utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
@@ -1528,7 +1539,8 @@
Locker lock(isolate);
HandleScope scope(isolate);
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- utility_context_ = Context::New(NULL, global_template);
+ utility_context_.Reset(isolate,
+ Context::New(isolate, NULL, global_template));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the debugger agent if requested.
@@ -1541,14 +1553,15 @@
}
-Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
+Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- Persistent<Context> context = Context::New(NULL, global_template);
+ HandleScope handle_scope(isolate);
+ Local<Context> context = Context::New(isolate, NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@@ -1566,7 +1579,7 @@
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
- return context;
+ return handle_scope.Close(context);
}
@@ -1730,7 +1743,7 @@
static char* ReadWord(char* data) {
return ReadToken(data, ' ');
}
-#endif // trueV8_SHARED
+#endif // V8_SHARED
// Reads a file into a v8 string.
@@ -1746,9 +1759,9 @@
void Shell::RunShell(Isolate* isolate) {
Locker locker(isolate);
- Context::Scope context_scope(evaluation_context_);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
HandleScope outer_scope(isolate);
+ Context::Scope context_scope(isolate, evaluation_context_);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Handle<String> name = String::New("(d8)");
LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
@@ -1797,7 +1810,7 @@
// Prepare the context for this thread.
Locker locker(isolate_);
HandleScope outer_scope(isolate_);
- Persistent<Context> thread_context =
+ Local<Context> thread_context =
Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
@@ -1821,7 +1834,6 @@
Shell::ExecuteString(isolate_, str, String::New(filename), false, false);
}
- thread_context.Dispose(thread_context->GetIsolate());
ptr = next_line;
}
}
@@ -1898,15 +1910,16 @@
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
- HandleScope scope(isolate);
- PerIsolateData data(isolate);
- Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
{
- Context::Scope cscope(context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- Execute(isolate);
+ HandleScope scope(isolate);
+ PerIsolateData data(isolate);
+ Local<Context> context = Shell::CreateEvaluationContext(isolate);
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ Execute(isolate);
+ }
}
- context.Dispose(isolate);
if (Shell::options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -2097,26 +2110,27 @@
#endif // V8_SHARED
{ // NOLINT
Locker lock(isolate);
- HandleScope scope(isolate);
- Persistent<Context> context = CreateEvaluationContext(isolate);
- if (options.last_run) {
- // Keep using the same context in the interactive shell.
- evaluation_context_ = context;
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // If the interactive debugger is enabled make sure to activate
- // it before running the files passed on the command line.
- if (i::FLAG_debugger) {
- InstallUtilityScript(isolate);
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- }
{
- Context::Scope cscope(context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
- options.isolate_sources[0].Execute(isolate);
+ HandleScope scope(isolate);
+ Local<Context> context = CreateEvaluationContext(isolate);
+ if (options.last_run) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_.Reset(isolate, context);
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ // If the interactive debugger is enabled make sure to activate
+ // it before running the files passed on the command line.
+ if (i::FLAG_debugger) {
+ InstallUtilityScript(isolate);
+ }
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+ }
+ {
+ Context::Scope cscope(context);
+ PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
+ options.isolate_sources[0].Execute(isolate);
+ }
}
if (!options.last_run) {
- context.Dispose(isolate);
if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -2161,7 +2175,7 @@
{
Initialize(isolate);
#ifdef ENABLE_VTUNE_JIT_INTERFACE
- vTune::InitilizeVtuneForV8();
+ vTune::InitializeVtuneForV8();
#endif
PerIsolateData data(isolate);
InitializeDebugger(isolate);
diff --git a/src/d8.h b/src/d8.h
index 4d9504f..c068dd9 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -273,7 +273,7 @@
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Handle<String> ReadFile(Isolate* isolate, const char* name);
- static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
+ static Local<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
@@ -292,8 +292,10 @@
static void MapCounters(const char* name);
#ifdef ENABLE_DEBUGGER_SUPPORT
- static Handle<Object> DebugMessageDetails(Handle<String> message);
- static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
+ static Handle<Object> DebugMessageDetails(Isolate* isolate,
+ Handle<String> message);
+ static Handle<Value> DebugCommandToJSONRequest(Isolate* isolate,
+ Handle<String> command);
static void DispatchDebugMessages();
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
@@ -414,8 +416,8 @@
ExternalArrayType type,
int32_t element_size);
static void ExternalArrayWeakCallback(Isolate* isolate,
- Persistent<Value> object,
- void* data);
+ Persistent<Object>* object,
+ uint8_t* data);
};
diff --git a/src/debug.cc b/src/debug.cc
index 8e1cf43..02ec124 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -76,12 +76,12 @@
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
- ScopedVector<char> data(s->Length() + 1);
+ ScopedVector<char> data(s->Utf8Length() + 1);
if (data.start() == NULL) {
V8::FatalProcessOutOfMemory("PrintLn");
return;
}
- s->WriteAscii(data.start());
+ s->WriteUtf8(data.start());
PrintF("%s\n", data.start());
}
diff --git a/src/factory.cc b/src/factory.cc
index 8dfab18..fe71a22 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -660,6 +660,14 @@
}
+Handle<HeapNumber> Factory::NewHeapNumber(double value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber);
+}
+
+
Handle<JSObject> Factory::NewNeanderObject() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -1092,6 +1100,10 @@
typed_array_fun = native_context->double_array_fun();
break;
+ case kExternalPixelArray:
+ typed_array_fun = native_context->uint8c_array_fun();
+ break;
+
default:
UNREACHABLE();
return Handle<JSTypedArray>();
diff --git a/src/factory.h b/src/factory.h
index ca6ad41..5e89708 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -267,6 +267,9 @@
Handle<Object> NewNumberFromUint(uint32_t value,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapNumber> NewHeapNumber(double value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// These objects are used by the api to create env-independent data
// structures in the heap.
Handle<JSObject> NewNeanderObject();
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 1e454ff..d5d58a7 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -166,6 +166,9 @@
"enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony_typed_arrays, false,
"enable harmony typed arrays")
+DEFINE_bool(harmony_array_buffer, false,
+ "enable harmony array buffer")
+DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
@@ -228,6 +231,8 @@
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
+DEFINE_bool(trace_migration, false, "trace object migration")
+DEFINE_bool(trace_generalization, false, "trace map generalization")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times,
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 7901a5e..1228ccf 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -98,6 +98,12 @@
}
+inline unsigned StackHandler::index() const {
+ const int offset = StackHandlerConstants::kStateOffset;
+ return IndexField::decode(Memory::unsigned_at(address() + offset));
+}
+
+
inline Object** StackHandler::context_address() const {
const int offset = StackHandlerConstants::kContextOffset;
return reinterpret_cast<Object**>(address() + offset);
@@ -216,8 +222,9 @@
inline Address JavaScriptFrame::GetOperandSlot(int index) const {
Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
ASSERT(IsAddressAligned(base, kPointerSize));
- ASSERT(type() == JAVA_SCRIPT);
- ASSERT(index < ComputeOperandsCount());
+ ASSERT_EQ(type(), JAVA_SCRIPT);
+ ASSERT_LT(index, ComputeOperandsCount());
+ ASSERT_LE(0, index);
// Operand stack grows down.
return base - index * kPointerSize;
}
diff --git a/src/frames.cc b/src/frames.cc
index aaf8c79..152cd30 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -840,6 +840,72 @@
}
+void JavaScriptFrame::SaveOperandStack(FixedArray* store,
+ int* stack_handler_index) const {
+ int operands_count = store->length();
+ ASSERT_LE(operands_count, ComputeOperandsCount());
+
+ // Visit the stack in LIFO order, saving operands and stack handlers into the
+ // array. The saved stack handlers store a link to the next stack handler,
+ // which will allow RestoreOperandStack to rewind the handlers.
+ StackHandlerIterator it(this, top_handler());
+ int i = operands_count - 1;
+ *stack_handler_index = -1;
+ for (; !it.done(); it.Advance()) {
+ StackHandler* handler = it.handler();
+ // Save operands pushed after the handler was pushed.
+ for (; GetOperandSlot(i) < handler->address(); i--) {
+ store->set(i, GetOperand(i));
+ }
+ ASSERT_GE(i + 1, StackHandlerConstants::kSlotCount);
+ ASSERT_EQ(handler->address(), GetOperandSlot(i));
+ int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount;
+ handler->Unwind(isolate(), store, next_stack_handler_index,
+ *stack_handler_index);
+ *stack_handler_index = next_stack_handler_index;
+ i -= StackHandlerConstants::kSlotCount;
+ }
+
+ // Save any remaining operands.
+ for (; i >= 0; i--) {
+ store->set(i, GetOperand(i));
+ }
+}
+
+
+void JavaScriptFrame::RestoreOperandStack(FixedArray* store,
+ int stack_handler_index) {
+ int operands_count = store->length();
+ ASSERT_LE(operands_count, ComputeOperandsCount());
+ int i = 0;
+ while (i <= stack_handler_index) {
+ if (i < stack_handler_index) {
+ // An operand.
+ ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+ Memory::Object_at(GetOperandSlot(i)) = store->get(i);
+ i++;
+ } else {
+ // A stack handler.
+ ASSERT_EQ(i, stack_handler_index);
+ // The FixedArray store grows up. The stack grows down. So the operand
+ // slot for i actually points to the bottom of the top word in the
+ // handler. The base of the StackHandler* is the address of the bottom
+ // word, which will be the last slot that is in the handler.
+ int handler_slot_index = i + StackHandlerConstants::kSlotCount - 1;
+ StackHandler *handler =
+ StackHandler::FromAddress(GetOperandSlot(handler_slot_index));
+ stack_handler_index = handler->Rewind(isolate(), store, i, fp());
+ i += StackHandlerConstants::kSlotCount;
+ }
+ }
+
+ for (; i < operands_count; i++) {
+ ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+ Memory::Object_at(GetOperandSlot(i)) = store->get(i);
+ }
+}
+
+
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
@@ -1438,6 +1504,60 @@
// -------------------------------------------------------------------------
+
+void StackHandler::Unwind(Isolate* isolate,
+ FixedArray* array,
+ int offset,
+ int previous_handler_offset) const {
+ STATIC_ASSERT(StackHandlerConstants::kSlotCount == 5);
+ ASSERT_LE(0, offset);
+ ASSERT_GE(array->length(), offset + 5);
+ // Unwinding a stack handler into an array chains it in the opposite
+ // direction, re-using the "next" slot as a "previous" link, so that stack
+ // handlers can be later re-wound in the correct order. Decode the "state"
+ // slot into "index" and "kind" and store them separately, using the fp slot.
+ array->set(offset, Smi::FromInt(previous_handler_offset)); // next
+ array->set(offset + 1, *code_address()); // code
+ array->set(offset + 2, Smi::FromInt(static_cast<int>(index()))); // state
+ array->set(offset + 3, *context_address()); // context
+ array->set(offset + 4, Smi::FromInt(static_cast<int>(kind()))); // fp
+
+ *isolate->handler_address() = next()->address();
+}
+
+
+int StackHandler::Rewind(Isolate* isolate,
+ FixedArray* array,
+ int offset,
+ Address fp) {
+ STATIC_ASSERT(StackHandlerConstants::kSlotCount == 5);
+ ASSERT_LE(0, offset);
+ ASSERT_GE(array->length(), offset + 5);
+ Smi* prev_handler_offset = Smi::cast(array->get(offset));
+ Code* code = Code::cast(array->get(offset + 1));
+ Smi* smi_index = Smi::cast(array->get(offset + 2));
+ Object* context = array->get(offset + 3);
+ Smi* smi_kind = Smi::cast(array->get(offset + 4));
+
+ unsigned state = KindField::encode(static_cast<Kind>(smi_kind->value())) |
+ IndexField::encode(static_cast<unsigned>(smi_index->value()));
+
+ Memory::Address_at(address() + StackHandlerConstants::kNextOffset) =
+ *isolate->handler_address();
+ Memory::Object_at(address() + StackHandlerConstants::kCodeOffset) = code;
+ Memory::uintptr_at(address() + StackHandlerConstants::kStateOffset) = state;
+ Memory::Object_at(address() + StackHandlerConstants::kContextOffset) =
+ context;
+ Memory::Address_at(address() + StackHandlerConstants::kFPOffset) = fp;
+
+ *isolate->handler_address() = address();
+
+ return prev_handler_offset->value();
+}
+
+
+// -------------------------------------------------------------------------
+
int NumRegs(RegList reglist) {
return CompilerIntrinsics::CountSetBits(reglist);
}
diff --git a/src/frames.h b/src/frames.h
index 678191b..3c44f5e 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -93,6 +93,7 @@
static const int kFPOffset = 4 * kPointerSize;
static const int kSize = kFPOffset + kPointerSize;
+ static const int kSlotCount = kSize >> kPointerSizeLog2;
};
@@ -131,9 +132,15 @@
inline bool is_catch() const;
inline bool is_finally() const;
+ // Generator support to preserve stack handlers.
+ void Unwind(Isolate* isolate, FixedArray* array, int offset,
+ int previous_handler_offset) const;
+ int Rewind(Isolate* isolate, FixedArray* array, int offset, Address fp);
+
private:
// Accessors.
inline Kind kind() const;
+ inline unsigned index() const;
inline Object** context_address() const;
inline Object** code_address() const;
@@ -541,6 +548,10 @@
inline Object* GetOperand(int index) const;
inline int ComputeOperandsCount() const;
+ // Generator support to preserve operand stack and stack handlers.
+ void SaveOperandStack(FixedArray* store, int* stack_handler_index) const;
+ void RestoreOperandStack(FixedArray* store, int stack_handler_index);
+
// Debugger access.
void SetParameterValue(int index, Object* value) const;
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 3734ae5..32242b2 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -410,6 +410,11 @@
// this has to be a separate pass _before_ populating or executing any module.
void AllocateModules(ZoneList<Declaration*>* declarations);
+ // Generator code to return a fresh iterator result object. The "value"
+ // property is set to a value popped from the stack, and "done" is set
+ // according to the argument.
+ void EmitReturnIteratorResult(bool done);
+
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 7ee89d7..29432bb 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "api.h"
@@ -232,7 +235,7 @@
void MakeWeak(GlobalHandles* global_handles,
void* parameter,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(state() != FREE);
set_state(WEAK);
@@ -264,7 +267,7 @@
set_state(NEAR_DEATH);
set_parameter(NULL);
- v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
+ v8::Persistent<v8::Value> object = ToApi<v8::Value>(handle());
{
// Check that we are not passing a finalized external string to
// the callback.
@@ -276,9 +279,11 @@
VMState<EXTERNAL> state(isolate);
if (near_death_callback_ != NULL) {
if (IsWeakCallback::decode(flags_)) {
- WeakReferenceCallback callback =
- reinterpret_cast<WeakReferenceCallback>(near_death_callback_);
- callback(object, par);
+ RevivableCallback callback =
+ reinterpret_cast<RevivableCallback>(near_death_callback_);
+ callback(reinterpret_cast<v8::Isolate*>(isolate),
+ &object,
+ par);
} else {
near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
object,
@@ -490,9 +495,9 @@
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
- ASSERT(near_death_callback != NULL);
+ ASSERT((weak_reference_callback == NULL) != (near_death_callback == NULL));
Node::FromLocation(location)->MakeWeak(this,
parameter,
weak_reference_callback,
diff --git a/src/global-handles.h b/src/global-handles.h
index 81e1476..f502dfa 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -130,6 +130,8 @@
// Destroy a global handle.
void Destroy(Object** location);
+ typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
+
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
// handles point to an object the handles are cleared and the callback
@@ -138,7 +140,7 @@
// reason is that Smi::FromInt(0) does not change during garage collection.
void MakeWeak(Object** location,
void* parameter,
- WeakReferenceCallback weak_reference_callback,
+ RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback);
void RecordStats(HeapStats* stats);
diff --git a/src/handles-inl.h b/src/handles-inl.h
index 5a3e9ed..4f5e9fe 100644
--- a/src/handles-inl.h
+++ b/src/handles-inl.h
@@ -53,8 +53,9 @@
template <typename T>
inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
+ ASSERT(location_ == NULL || !(*location_)->IsFailure());
+ if (location_ == other.location_) return true;
+ if (location_ == NULL || other.location_ == NULL) return false;
// Dereferencing deferred handles to check object equality is safe.
SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true));
return *location_ == *other.location_;
@@ -63,24 +64,22 @@
template <typename T>
inline T* Handle<T>::operator*() const {
- ASSERT(location_ != NULL);
- ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
+ ASSERT(location_ != NULL && !(*location_)->IsFailure());
SLOW_ASSERT(IsDereferenceAllowed(false));
return *BitCast<T**>(location_);
}
template <typename T>
inline T** Handle<T>::location() const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
- SLOW_ASSERT(IsDereferenceAllowed(false));
+ ASSERT(location_ == NULL || !(*location_)->IsFailure());
+ SLOW_ASSERT(location_ == NULL || IsDereferenceAllowed(false));
return location_;
}
#ifdef DEBUG
template <typename T>
bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
- if (location_ == NULL) return true;
+ ASSERT(location_ != NULL);
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index f937426..b71978b 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -650,6 +650,10 @@
return old;
}
+inline void Heap::set_allow_allocation(bool allocation_allowed) {
+ allocation_allowed_ = allocation_allowed;
+}
+
#endif
@@ -864,33 +868,41 @@
#ifdef DEBUG
-AssertNoAllocation::AssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(false);
+bool EnterAllocationScope(Isolate* isolate, bool allow_allocation) {
+ bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ bool last_state = isolate->heap()->IsAllocationAllowed();
+ if (active) {
+ // TODO(yangguo): Make HandleDereferenceGuard avoid isolate mutation in the
+ // same way if running on the optimizer thread.
+ isolate->heap()->set_allow_allocation(allow_allocation);
+ }
+ return last_state;
+}
+
+
+void ExitAllocationScope(Isolate* isolate, bool last_state) {
+ bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
+ if (active) {
+ isolate->heap()->set_allow_allocation(last_state);
}
}
+AssertNoAllocation::AssertNoAllocation()
+ : last_state_(EnterAllocationScope(ISOLATE, false)) {
+}
+
AssertNoAllocation::~AssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
+ ExitAllocationScope(ISOLATE, last_state_);
}
-
-DisableAssertNoAllocation::DisableAssertNoAllocation() {
- Isolate* isolate = ISOLATE;
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- old_state_ = isolate->heap()->allow_allocation(true);
- }
+DisableAssertNoAllocation::DisableAssertNoAllocation()
+ : last_state_(EnterAllocationScope(ISOLATE, true)) {
}
-
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- if (active_) HEAP->allow_allocation(old_state_);
+ ExitAllocationScope(ISOLATE, last_state_);
}
-
#else
AssertNoAllocation::AssertNoAllocation() { }
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 3d890f7..f488304 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -1309,8 +1309,7 @@
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
int real_size = js_obj->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetDetails(i).descriptor_index() > real_size) continue;
+ for (int i = 0; i < real_size; i++) {
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
@@ -1332,7 +1331,7 @@
js_obj->GetInObjectPropertyOffset(index));
}
} else {
- Object* value = js_obj->FastPropertyAt(index);
+ Object* value = js_obj->RawFastPropertyAt(index);
if (k != heap_->hidden_string()) {
SetPropertyReference(js_obj, entry, k, value);
} else {
diff --git a/src/heap.cc b/src/heap.cc
index 33ba3b8..6139080 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3176,7 +3176,8 @@
MaybeObject* Heap::NumberToString(Object* number,
- bool check_number_string_cache) {
+ bool check_number_string_cache,
+ PretenureFlag pretenure) {
isolate_->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
@@ -3197,7 +3198,8 @@
}
Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
+ MaybeObject* maybe_js_string =
+ AllocateStringFromOneByte(CStrVector(str), pretenure);
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@@ -4158,7 +4160,7 @@
ASSERT(name->IsInternalizedString());
// TODO(verwaest): Since we cannot update the boilerplate's map yet,
// initialize to the worst case.
- FieldDescriptor field(name, i, NONE, Representation::Tagged(), i + 1);
+ FieldDescriptor field(name, i, NONE, Representation::Tagged());
descriptors->Set(i, &field, witness);
}
descriptors->Sort();
@@ -4338,8 +4340,7 @@
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
if (to_kind != initial_map->elements_kind()) {
- MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
- isolate(), to_kind);
+ MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
// Possibly alter the mode, since we found an updated elements kind
// in the type info cell.
@@ -4587,13 +4588,10 @@
// The global object might be created from an object template with accessors.
// Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- Representation::None(),
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
Object* value = descs->GetCallbacksObject(i);
MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
if (!maybe_value->ToObject(&value)) return maybe_value;
diff --git a/src/heap.h b/src/heap.h
index 7722079..add42c0 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1476,6 +1476,7 @@
#ifdef DEBUG
bool IsAllocationAllowed() { return allocation_allowed_; }
+ inline void set_allow_allocation(bool allocation_allowed);
inline bool allow_allocation(bool enable);
bool disallow_allocation_failure() {
@@ -1530,6 +1531,14 @@
return new_space_high_promotion_mode_active_;
}
+ inline PretenureFlag GetPretenureMode() {
+ return new_space_high_promotion_mode_active_ ? TENURED : NOT_TENURED;
+ }
+
+ inline Address* NewSpaceHighPromotionModeActiveAddress() {
+ return reinterpret_cast<Address*>(&new_space_high_promotion_mode_active_);
+ }
+
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
@@ -1608,7 +1617,8 @@
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
- Object* number, bool check_number_string_cache = true);
+ Object* number, bool check_number_string_cache = true,
+ PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
@@ -1975,7 +1985,8 @@
// Indicates that the new space should be kept small due to high promotion
// rates caused by the mutator allocating a lot of long-lived objects.
- bool new_space_high_promotion_mode_active_;
+ // TODO(hpayer): change to bool if no longer accessed from generated code
+ intptr_t new_space_high_promotion_mode_active_;
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
@@ -2691,6 +2702,13 @@
// { AssertNoAllocation nogc;
// ...
// }
+
+#ifdef DEBUG
+inline bool EnterAllocationScope(Isolate* isolate, bool allow_allocation);
+inline void ExitAllocationScope(Isolate* isolate, bool last_state);
+#endif
+
+
class AssertNoAllocation {
public:
inline AssertNoAllocation();
@@ -2698,8 +2716,7 @@
#ifdef DEBUG
private:
- bool old_state_;
- bool active_;
+ bool last_state_;
#endif
};
@@ -2711,8 +2728,7 @@
#ifdef DEBUG
private:
- bool old_state_;
- bool active_;
+ bool last_state_;
#endif
};
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 70e2395..8f8c59e 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1601,15 +1601,6 @@
}
-void HLoadElements::PrintDataTo(StringStream* stream) {
- value()->PrintNameTo(stream);
- if (HasTypeCheck()) {
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
- }
-}
-
-
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" [%p", *map_set()->first());
@@ -1958,6 +1949,10 @@
void HPhi::InitRealUses(int phi_id) {
// Initialize real uses.
phi_id_ = phi_id;
+ // Compute a conservative approximation of truncating uses before inferring
+ // representations. The proper, exact computation will be done later, when
+ // inserting representation changes.
+ SetFlag(kTruncatingToInt32);
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
@@ -1967,6 +1962,9 @@
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
+ if (!value->IsSimulate() && !value->CheckFlag(kTruncatingToInt32)) {
+ ClearFlag(kTruncatingToInt32);
+ }
}
}
}
@@ -2523,6 +2521,8 @@
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
+ // Deprecated maps are updated to the current map in the type oracle.
+ ASSERT(!map->is_deprecated());
LookupResult lookup(map->GetIsolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
@@ -2534,6 +2534,12 @@
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
+ if (FLAG_track_double_fields &&
+ lookup.representation().IsDouble()) {
+ // Since the value needs to be boxed, use a generic handler for
+ // loading doubles.
+ continue;
+ }
types_.Add(types->at(i), zone);
break;
}
@@ -3507,14 +3513,7 @@
void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
- // If there are non-Phi uses, and all of them have observed the same
- // representation, than that's what this Phi is going to use.
- Representation new_rep = RepresentationObservedByAllNonPhiUses();
- if (!new_rep.IsNone()) {
- UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
- return;
- }
- new_rep = RepresentationFromInputs();
+ Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
@@ -3523,22 +3522,6 @@
}
-Representation HPhi::RepresentationObservedByAllNonPhiUses() {
- int non_phi_use_count = 0;
- for (int i = Representation::kInteger32;
- i < Representation::kNumRepresentations; ++i) {
- non_phi_use_count += non_phi_uses_[i];
- }
- if (non_phi_use_count <= 1) return Representation::None();
- for (int i = 0; i < Representation::kNumRepresentations; ++i) {
- if (non_phi_uses_[i] == non_phi_use_count) {
- return Representation::FromKind(static_cast<Representation::Kind>(i));
- }
- }
- return Representation::None();
-}
-
-
Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index c09f261..3ea99d4 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -135,7 +135,6 @@
V(IsUndetectableAndBranch) \
V(LeaveInlined) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -207,6 +206,7 @@
V(Calls) \
V(InobjectFields) \
V(BackingStoreFields) \
+ V(DoubleFields) \
V(ElementsKind) \
V(ElementsPointer) \
V(ArrayElements) \
@@ -2353,15 +2353,20 @@
Handle<JSGlobalPropertyCell> type_cell)
: HCallNew(context, constructor, argument_count),
type_cell_(type_cell) {
+ elements_kind_ = static_cast<ElementsKind>(
+ Smi::cast(type_cell->value())->value());
}
Handle<JSGlobalPropertyCell> property_cell() const {
return type_cell_;
}
+ ElementsKind elements_kind() const { return elements_kind_; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
private:
+ ElementsKind elements_kind_;
Handle<JSGlobalPropertyCell> type_cell_;
};
@@ -2585,39 +2590,6 @@
};
-class HLoadElements: public HTemplateInstruction<2> {
- public:
- HLoadElements(HValue* value, HValue* typecheck) {
- SetOperandAt(0, value);
- SetOperandAt(1, typecheck != NULL ? typecheck : value);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnElementsPointer);
- }
-
- HValue* value() { return OperandAt(0); }
- HValue* typecheck() {
- ASSERT(HasTypeCheck());
- return OperandAt(1);
- }
- bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
-
- virtual void PrintDataTo(StringStream* stream);
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-
- private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
class HLoadExternalArrayPointer: public HUnaryOperation {
public:
explicit HLoadExternalArrayPointer(HValue* value)
@@ -3013,7 +2985,6 @@
virtual Range* InferRange(Zone* zone);
virtual void InferRepresentation(HInferRepresentation* h_infer);
- Representation RepresentationObservedByAllNonPhiUses();
Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
@@ -3392,16 +3363,27 @@
HValue* left() { return OperandAt(1); }
HValue* right() { return OperandAt(2); }
- // TODO(kasperl): Move these helpers to the IA-32 Lithium
- // instruction sequence builder.
- HValue* LeastConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return right();
- return left();
+ // True if switching left and right operands likely generates better code.
+ bool AreOperandsBetterSwitched() {
+ if (!IsCommutative()) return false;
+
+ // Constant operands are better off on the right, they can be inlined in
+ // many situations on most platforms.
+ if (left()->IsConstant()) return true;
+ if (right()->IsConstant()) return false;
+
+ // Otherwise, if there is only one use of the right operand, it would be
+ // better off on the left for platforms that only have 2-arg arithmetic
+ // ops (e.g ia32, x64) that clobber the left operand.
+ return (right()->UseCount() == 1);
}
- HValue* MostConstantOperand() {
- if (IsCommutative() && left()->IsConstant()) return left();
- return right();
+ HValue* BetterLeftOperand() {
+ return AreOperandsBetterSwitched() ? right() : left();
+ }
+
+ HValue* BetterRightOperand() {
+ return AreOperandsBetterSwitched() ? left() : right();
}
void set_observed_input_representation(int index, Representation rep) {
@@ -5242,11 +5224,16 @@
set_representation(Representation::Tagged());
}
SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- if (is_in_object) {
+ if (FLAG_track_double_fields && representation().IsDouble()) {
+ ASSERT(is_in_object);
+ ASSERT(offset == HeapNumber::kValueOffset);
+ SetGVNFlag(kDependsOnDoubleFields);
+ } else if (is_in_object) {
SetGVNFlag(kDependsOnInobjectFields);
+ SetGVNFlag(kDependsOnMaps);
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
+ SetGVNFlag(kDependsOnMaps);
}
}
@@ -5586,7 +5573,7 @@
class HStoreNamedField: public HTemplateInstruction<2> {
public:
HStoreNamedField(HValue* obj,
- Handle<String> name,
+ Handle<Name> name,
HValue* val,
bool in_object,
Representation field_representation,
@@ -5600,18 +5587,25 @@
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetFlag(kTrackSideEffectDominators);
- SetGVNFlag(kDependsOnNewSpacePromotion);
- if (is_in_object_) {
+ if (FLAG_track_double_fields && field_representation.IsDouble()) {
+ SetGVNFlag(kChangesDoubleFields);
+ } else if (is_in_object_) {
SetGVNFlag(kChangesInobjectFields);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
} else {
SetGVNFlag(kChangesBackingStoreFields);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
}
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
virtual Representation RequiredInputRepresentation(int index) {
- if (FLAG_track_fields && index == 1 && field_representation_.IsSmi()) {
+ if (FLAG_track_double_fields &&
+ index == 1 && field_representation_.IsDouble()) {
+ return field_representation_;
+ } else if (FLAG_track_fields &&
+ index == 1 && field_representation_.IsSmi()) {
return Representation::Integer32();
}
return Representation::Tagged();
@@ -5625,7 +5619,7 @@
HValue* object() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
- Handle<String> name() const { return name_; }
+ Handle<Name> name() const { return name_; }
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
@@ -5634,7 +5628,11 @@
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
+ ASSERT(!(FLAG_track_double_fields && field_representation_.IsDouble()) ||
+ transition_.is_null());
return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
+ // If there is a transition, a new storage object needs to be allocated.
+ !(FLAG_track_double_fields && field_representation_.IsDouble()) &&
StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
@@ -5652,7 +5650,7 @@
}
private:
- Handle<String> name_;
+ Handle<Name> name_;
bool is_in_object_;
Representation field_representation_;
int offset_;
@@ -6141,12 +6139,14 @@
bool fast_elements,
int literal_index,
int depth,
+ bool may_store_doubles,
bool has_function)
: HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
constant_properties_length_(constant_properties->length()),
literals_(literals),
fast_elements_(fast_elements),
+ may_store_doubles_(may_store_doubles),
has_function_(has_function) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
@@ -6161,6 +6161,7 @@
}
Handle<FixedArray> literals() const { return literals_; }
bool fast_elements() const { return fast_elements_; }
+ bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
virtual Representation RequiredInputRepresentation(int index) {
@@ -6175,6 +6176,7 @@
int constant_properties_length_;
Handle<FixedArray> literals_;
bool fast_elements_ : 1;
+ bool may_store_doubles_ : 1;
bool has_function_ : 1;
};
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index d131220..5c573fe 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1271,8 +1271,7 @@
}
bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
bool fast_elements = IsFastObjectElementsKind(elements_kind);
- HValue* elements =
- AddInstruction(new(zone) HLoadElements(object, mapcheck));
+ HValue* elements = AddLoadElements(object, mapcheck);
if (is_store && (fast_elements || fast_smi_only_elements) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
HCheckMaps* check_cow_map = HCheckMaps::New(
@@ -1521,6 +1520,18 @@
}
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
+ HValue* typecheck) {
+ HLoadNamedField* instr = new(zone()) HLoadNamedField(object, true,
+ Representation::Tagged(), JSObject::kElementsOffset, typecheck);
+ AddInstruction(instr);
+ instr->SetGVNFlag(kDependsOnElementsPointer);
+ instr->ClearGVNFlag(kDependsOnMaps);
+ instr->ClearGVNFlag(kDependsOnInobjectFields);
+ return instr;
+}
+
+
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* context,
HValue* old_capacity) {
Zone* zone = this->zone();
@@ -1744,8 +1755,7 @@
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
- HValue* boilerplate_elements =
- AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
+ HValue* boilerplate_elements = AddLoadElements(boilerplate);
HValue* object_elements =
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
AddInstruction(new(zone) HStoreNamedField(object,
@@ -1853,6 +1863,26 @@
}
+HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* context) {
+ HInstruction* global_object = AddInstruction(new(zone())
+ HGlobalObject(context));
+ HInstruction* native_context = AddInstruction(new(zone())
+ HLoadNamedField(global_object, true, Representation::Tagged(),
+ GlobalObject::kNativeContextOffset));
+ return native_context;
+}
+
+
+HInstruction* HGraphBuilder::BuildGetArrayFunction(HValue* context) {
+ HInstruction* native_context = BuildGetNativeContext(context);
+ int offset = Context::kHeaderSize +
+ kPointerSize * Context::ARRAY_FUNCTION_INDEX;
+ HInstruction* array_function = AddInstruction(new(zone())
+ HLoadNamedField(native_context, true, Representation::Tagged(), offset));
+ return array_function;
+}
+
+
HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
@@ -1869,12 +1899,7 @@
HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) {
- // Get the global context, the native context, the map array
- HInstruction* global_object = AddInstruction(new(zone())
- HGlobalObject(context));
- HInstruction* native_context = AddInstruction(new(zone())
- HLoadNamedField(global_object, true, Representation::Tagged(),
- GlobalObject::kNativeContextOffset));
+ HInstruction* native_context = builder()->BuildGetNativeContext(context);
int offset = Context::kHeaderSize +
kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
HInstruction* map_array = AddInstruction(new(zone())
@@ -3763,7 +3788,39 @@
}
}
+ // Set truncation flags for groups of connected phis. This is a conservative
+ // approximation; the flag will be properly re-computed after representations
+ // have been determined.
+ if (phi_count > 0) {
+ BitVector* done = new(zone()) BitVector(phi_count, graph_->zone());
+ for (int i = 0; i < phi_count; ++i) {
+ if (done->Contains(i)) continue;
+
+ // Check if all uses of all connected phis in this group are truncating.
+ bool all_uses_everywhere_truncating = true;
+ for (BitVector::Iterator it(connected_phis.at(i));
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ all_uses_everywhere_truncating &=
+ phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+ done->Add(index);
+ }
+ if (all_uses_everywhere_truncating) {
+ continue; // Great, nothing to do.
+ }
+ // Clear truncation flag of this group of connected phis.
+ for (BitVector::Iterator it(connected_phis.at(i));
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
+ phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+ }
+ }
+ }
+
// Simplify constant phi inputs where possible.
+ // This step uses kTruncatingToInt32 flags of phis.
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->SimplifyConstantInputs();
}
@@ -4043,36 +4100,50 @@
// int32-phis allow truncation and iteratively remove the ones that
// are used in an operation that does not allow a truncating
// conversion.
- // TODO(fschneider): Replace this with a worklist-based iteration.
+ ZoneList<HPhi*> worklist(8, zone());
+
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (phi->representation().IsInteger32()) {
phi->SetFlag(HValue::kTruncatingToInt32);
}
}
- bool change = true;
- while (change) {
- change = false;
- for (int i = 0; i < phi_list()->length(); i++) {
- HPhi* phi = phi_list()->at(i);
- if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if ((input_representation.IsInteger32() &&
- !use->CheckFlag(HValue::kTruncatingToInt32)) ||
- input_representation.IsDouble()) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
- }
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
- break;
+
+ for (int i = 0; i < phi_list()->length(); i++) {
+ HPhi* phi = phi_list()->at(i);
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ // If a Phi is used as a non-truncating int32 or as a double,
+ // clear its "truncating" flag.
+ HValue* use = it.value();
+ Representation input_representation =
+ use->RequiredInputRepresentation(it.index());
+ if ((input_representation.IsInteger32() &&
+ !use->CheckFlag(HValue::kTruncatingToInt32)) ||
+ input_representation.IsDouble()) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ phi->id(), it.value()->id(), it.value()->Mnemonic());
}
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ worklist.Add(phi, zone());
+ break;
+ }
+ }
+ }
+
+ while (!worklist.is_empty()) {
+ HPhi* current = worklist.RemoveLast();
+ for (int i = 0; i < current->OperandCount(); ++i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() &&
+ input->representation().IsInteger32() &&
+ input->CheckFlag(HValue::kTruncatingToInt32)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
+ }
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ worklist.Add(HPhi::cast(input), zone());
}
}
}
@@ -5380,6 +5451,9 @@
while (!worklist.is_empty()) {
HInstruction* instr = worklist.RemoveLast();
+ // This happens when an instruction is used multiple times as operand. That
+ // in turn could happen through GVN.
+ if (!instr->IsLinked()) continue;
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
@@ -6620,10 +6694,16 @@
if (properties->length() > 0) {
return false;
} else {
- int nof = boilerplate->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors());
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ Representation representation = details.representation();
+ int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
- Handle<Object> value(boilerplate->InObjectPropertyAt(i), isolate);
+ Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
@@ -6633,6 +6713,8 @@
pointer_size)) {
return false;
}
+ } else if (representation.IsDouble()) {
+ *data_size += HeapNumber::kSize;
}
}
}
@@ -6682,6 +6764,7 @@
expr->fast_elements(),
expr->literal_index(),
expr->depth(),
+ expr->may_store_doubles(),
expr->has_function()));
}
@@ -6830,7 +6913,7 @@
// of the property values and is the value of the entire expression.
Push(literal);
- HLoadElements* elements = NULL;
+ HInstruction* elements = NULL;
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
@@ -6842,10 +6925,7 @@
HValue* value = Pop();
if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
- // Pass in literal as dummy depedency, since the receiver always has
- // elements.
- elements = new(zone()) HLoadElements(literal, literal);
- AddInstruction(elements);
+ elements = AddLoadElements(literal);
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()),
@@ -6991,9 +7071,33 @@
} else {
offset += FixedArray::kHeaderSize;
}
+ bool transition_to_field = lookup->IsTransitionToField(*map);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (transition_to_field) {
+ NoObservableSideEffectsScope no_side_effects(this);
+ HInstruction* heap_number_size = AddInstruction(new(zone()) HConstant(
+ HeapNumber::kSize, Representation::Integer32()));
+ HInstruction* double_box = AddInstruction(new(zone()) HAllocate(
+ environment()->LookupContext(), heap_number_size,
+ HType::HeapNumber(), HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+ BuildStoreMap(double_box, isolate()->factory()->heap_number_map());
+ AddInstruction(new(zone()) HStoreNamedField(
+ double_box, name, value, true,
+ Representation::Double(), HeapNumber::kValueOffset));
+ value = double_box;
+ representation = Representation::Tagged();
+ } else {
+ HInstruction* double_box = AddInstruction(new(zone()) HLoadNamedField(
+ object, is_in_object, Representation::Tagged(), offset));
+ double_box->set_type(HType::HeapNumber());
+ return new(zone()) HStoreNamedField(
+ double_box, name, value, true,
+ Representation::Double(), HeapNumber::kValueOffset);
+ }
+ }
HStoreNamedField* instr = new(zone()) HStoreNamedField(
object, name, value, is_in_object, representation, offset);
- if (lookup->IsTransitionToField(*map)) {
+ if (transition_to_field) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
@@ -7075,22 +7179,31 @@
HValue* object,
SmallMapList* types,
Handle<String> name) {
- int count = 0;
- int previous_field_offset = 0;
- bool previous_field_is_in_object = false;
- bool is_monomorphic_field = true;
if (HandlePolymorphicArrayLengthLoad(expr, object, types, name))
return;
- Handle<Map> map;
- LookupResult lookup(isolate());
- for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- map = types->at(i);
- if (ComputeLoadStoreField(map, name, &lookup, false)) {
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+
+ // Use monomorphic load if property lookup results in the same field index
+ // for all maps. Requires special map check on the set of all handled maps.
+ HInstruction* instr = NULL;
+ if (types->length() > 0 && types->length() <= kMaxLoadPolymorphism) {
+ LookupResult lookup(isolate());
+ int previous_field_offset = 0;
+ bool previous_field_is_in_object = false;
+ Representation representation = Representation::None();
+ int count;
+ for (count = 0; count < types->length(); ++count) {
+ Handle<Map> map = types->at(count);
+ if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
+
int index = ComputeLoadStoreFieldIndex(map, &lookup);
+ Representation new_representation =
+ ComputeLoadStoreRepresentation(map, &lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
+
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
@@ -7098,31 +7211,33 @@
} else {
offset += FixedArray::kHeaderSize;
}
+
if (count == 0) {
previous_field_offset = offset;
previous_field_is_in_object = is_in_object;
- } else if (is_monomorphic_field) {
- is_monomorphic_field = (offset == previous_field_offset) &&
- (is_in_object == previous_field_is_in_object);
+ representation = new_representation;
+ } else if (offset != previous_field_offset ||
+ is_in_object != previous_field_is_in_object ||
+ (FLAG_track_fields &&
+ !representation.IsCompatibleForLoad(new_representation))) {
+ break;
}
- ++count;
+
+ representation = representation.generalize(new_representation);
+ }
+
+ if (count == types->length()) {
+ AddInstruction(HCheckMaps::New(object, types, zone()));
+ instr = DoBuildLoadNamedField(
+ object, previous_field_is_in_object,
+ representation, previous_field_offset);
}
}
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- AddInstruction(new(zone()) HCheckNonSmi(object));
- HInstruction* instr;
- if (count == types->length() && is_monomorphic_field) {
- AddInstruction(HCheckMaps::New(object, types, zone()));
- instr = BuildLoadNamedField(object, map, &lookup);
- } else {
+ if (instr == NULL) {
HValue* context = environment()->LookupContext();
- instr = new(zone()) HLoadNamedFieldPolymorphic(context,
- object,
- types,
- name,
- zone());
+ instr = new(zone()) HLoadNamedFieldPolymorphic(
+ context, object, types, name, zone());
}
instr->set_position(expr->position());
@@ -7271,14 +7386,15 @@
// Keyed store.
CHECK_ALIVE(VisitForValue(prop->key()));
CHECK_ALIVE(VisitForValue(expr->value()));
- HValue* value = Pop();
- HValue* key = Pop();
- HValue* object = Pop();
+ HValue* value = environment()->ExpressionStackAt(0);
+ HValue* key = environment()->ExpressionStackAt(1);
+ HValue* object = environment()->ExpressionStackAt(2);
bool has_side_effects = false;
HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
expr->position(),
true, // is_store
&has_side_effects);
+ Drop(3);
Push(value);
AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
@@ -7679,18 +7795,38 @@
HValue* object,
Handle<Map> map,
LookupResult* lookup) {
- Representation representation = lookup->representation();
int index = lookup->GetLocalFieldIndexFromMap(*map);
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- int offset = (index * kPointerSize) + map->instance_size();
- return new(zone()) HLoadNamedField(object, true, representation, offset);
- } else {
- // Non-negative property indices are in the properties array.
- int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new(zone()) HLoadNamedField(object, false, representation, offset);
+ // Negative property indices are in-object properties, indexed from the end of
+ // the fixed part of the object. Non-negative property indices are in the
+ // properties array.
+ int inobject = index < 0;
+ Representation representation = lookup->representation();
+ int offset = inobject
+ ? index * kPointerSize + map->instance_size()
+ : index * kPointerSize + FixedArray::kHeaderSize;
+ return DoBuildLoadNamedField(object, inobject, representation, offset);
+}
+
+
+HLoadNamedField* HGraphBuilder::DoBuildLoadNamedField(
+ HValue* object,
+ bool inobject,
+ Representation representation,
+ int offset) {
+ bool load_double = false;
+ if (representation.IsDouble()) {
+ representation = Representation::Tagged();
+ load_double = FLAG_track_double_fields;
}
+ HLoadNamedField* field =
+ new(zone()) HLoadNamedField(object, inobject, representation, offset);
+ if (load_double) {
+ AddInstruction(field);
+ field->set_type(HType::HeapNumber());
+ return new(zone()) HLoadNamedField(
+ field, true, Representation::Double(), HeapNumber::kValueOffset);
+ }
+ return field;
}
@@ -7960,8 +8096,7 @@
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, checkspec));
+ HInstruction* elements = AddLoadElements(object, checkspec);
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
@@ -9588,16 +9723,13 @@
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HCallNew* call;
if (use_call_new_array) {
- AddInstruction(new(zone()) HCheckFunction(constructor,
- Handle<JSFunction>(isolate()->global_context()->array_function())));
- Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
- ASSERT(feedback->IsSmi());
-
// TODO(mvstanton): It would be better to use the already created global
// property cell that is shared by full code gen. That way, any transition
// information that happened after crankshaft won't be lost. The right
// way to do that is to begin passing the cell to the type feedback oracle
// instead of just the value in the cell. Do this in a follow-up checkin.
+ Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
+ ASSERT(feedback->IsSmi());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(feedback);
@@ -10010,7 +10142,7 @@
if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
- input = Pop();
+ input = environment()->ExpressionStackAt(0);
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
@@ -10018,10 +10150,10 @@
true, // is_store
&has_side_effects);
- // Drop the key from the bailout environment. Overwrite the receiver
- // with the result of the operation, and the placeholder with the
- // original value if necessary.
- Drop(1);
+ // Drop the key and the original value from the bailout environment.
+ // Overwrite the receiver with the result of the operation, and the
+ // placeholder with the original value if necessary.
+ Drop(2);
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
@@ -10693,7 +10825,6 @@
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
int elements_offset = *offset + object_size;
- int inobject_properties = boilerplate_object->map()->inobject_properties();
if (create_allocation_site_info) {
elements_offset += AllocationSiteInfo::kSize;
*offset += AllocationSiteInfo::kSize;
@@ -10707,32 +10838,49 @@
// Copy in-object properties.
HValue* object_properties =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
- for (int i = 0; i < inobject_properties; i++) {
+
+ Handle<DescriptorArray> descriptors(
+ boilerplate_object->map()->instance_descriptors());
+ int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
+
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
+ Handle<Name> name(descriptors->GetKey(i));
Handle<Object> value =
- Handle<Object>(boilerplate_object->InObjectPropertyAt(i),
+ Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_boilerplate_object->InObjectPropertyAt(i),
+ Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
- // TODO(verwaest): choose correct storage.
AddInstruction(new(zone) HStoreNamedField(
- object_properties, factory->unknown_field_string(), value_instruction,
- true, Representation::Tagged(),
- boilerplate_object->GetInObjectPropertyOffset(i)));
+ object_properties, name, value_instruction, true,
+ Representation::Tagged(), property_offset));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
- // TODO(verwaest): choose correct storage.
+ Representation representation = details.representation();
HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
value, Representation::Tagged()));
+ if (representation.IsDouble()) {
+ HInstruction* double_box =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ BuildStoreMap(double_box, factory->heap_number_map());
+ AddInstruction(new(zone) HStoreNamedField(
+ double_box, name, value_instruction, true,
+ Representation::Double(), HeapNumber::kValueOffset));
+ value_instruction = double_box;
+ *offset += HeapNumber::kSize;
+ }
AddInstruction(new(zone) HStoreNamedField(
- object_properties, factory->unknown_field_string(), value_instruction,
- true, Representation::Tagged(),
- boilerplate_object->GetInObjectPropertyOffset(i)));
+ object_properties, name, value_instruction, true,
+ Representation::Tagged(), property_offset));
}
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index ab721bd..a95424a 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -982,6 +982,11 @@
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
+ HLoadNamedField* DoBuildLoadNamedField(HValue* object,
+ bool inobject,
+ Representation representation,
+ int offset);
+
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
@@ -1025,6 +1030,8 @@
HInstruction* BuildStoreMap(HValue* object, HValue* map);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
+ HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
+
class IfBuilder {
public:
explicit IfBuilder(HGraphBuilder* builder,
@@ -1317,6 +1324,9 @@
int previous_object_size,
HValue* payload);
+ HInstruction* BuildGetNativeContext(HValue* context);
+ HInstruction* BuildGetArrayFunction(HValue* context);
+
private:
HGraphBuilder();
CompilationInfo* info_;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 62e90e3..2897234 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -80,6 +80,28 @@
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -108,9 +130,10 @@
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
+ // edi -- function
// ebx -- type info cell with elements kind
- static Register registers[] = { ebx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { edi, ebx };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
@@ -3299,12 +3322,6 @@
}
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -4759,6 +4776,7 @@
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(ecx, &miss);
__ cmp(ecx, Immediate(terminal_kind_sentinel));
__ j(above, &miss);
// Load the global or builtins object from the current context
@@ -5822,8 +5840,33 @@
__ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
__ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ __ j(zero, &skip_write_barrier);
+
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ RecordWriteField(ecx,
+ ConsString::kFirstOffset,
+ eax,
+ ebx,
+ kDontSaveFPRegs);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ RecordWriteField(ecx,
+ ConsString::kSecondOffset,
+ edx,
+ ebx,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
__ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+
+ __ bind(&after_writing);
+
__ mov(eax, ecx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -7355,8 +7398,10 @@
{ REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub
+ // FastNewClosureStub and StringAddStub::Generate
{ REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
+ // StringAddStub::Generate
+ { REG(ecx), REG(eax), REG(ebx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7877,15 +7922,8 @@
// Get the elements kind and case on that.
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
- __ mov(edx, FieldOperand(ebx, kPointerSize));
-
- // There is no info if the call site went megamorphic either
-
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ cmp(edx, Immediate(TypeFeedbackCells::MegamorphicSentinel(
- masm->isolate())));
- __ j(equal, &no_info);
+ __ mov(edx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(edx, &no_info);
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index f71a76d..5a78019 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1529,7 +1529,8 @@
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
@@ -1900,11 +1901,12 @@
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ j(not_equal, &resume);
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1916,18 +1918,7 @@
__ mov(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2033,6 +2024,54 @@
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ mov(ebx, map);
+ __ pop(ecx);
+ __ mov(edx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
+ __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
+ ecx, edx, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ mov(context_register(),
+ Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 964db0e..e05031b 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1530,6 +1530,26 @@
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 4318f31..1d9e942 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1906,16 +1906,24 @@
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
} else {
- __ add(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ if (right->IsConstantOperand()) {
+ __ add(ToOperand(left), ToInteger32Immediate(right));
+ } else {
+ __ add(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
}
@@ -2947,42 +2955,27 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
- if (!FLAG_track_double_fields) {
- ASSERT(!instr->hydrogen()->representation().IsDouble());
- }
- Register temp = instr->hydrogen()->representation().IsDouble()
- ? ToRegister(instr->temp()) : ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ mov(temp, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(temp, FieldOperand(temp, instr->hydrogen()->offset()));
- }
-
- if (instr->hydrogen()->representation().IsDouble()) {
- Label load_from_heap_number, done;
+ if (FLAG_track_double_fields &&
+ instr->hydrogen()->representation().IsDouble()) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ JumpIfNotSmi(temp, &load_from_heap_number);
- __ SmiUntag(temp);
- __ cvtsi2sd(result, Operand(temp));
- __ jmp(&done);
- __ bind(&load_from_heap_number);
- __ movdbl(result, FieldOperand(temp, HeapNumber::kValueOffset));
+ __ movdbl(result, FieldOperand(object, offset));
} else {
- __ JumpIfNotSmi(temp, &load_from_heap_number);
- __ SmiUntag(temp);
- __ push(temp);
- __ fild_s(Operand(esp, 0));
- __ pop(temp);
- __ jmp(&done);
- __ bind(&load_from_heap_number);
- PushX87DoubleOperand(FieldOperand(temp, HeapNumber::kValueOffset));
+ PushX87DoubleOperand(FieldOperand(object, offset));
CurrentInstructionReturnsX87Result();
}
- __ bind(&done);
+ return;
+ }
+
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ mov(result, FieldOperand(object, offset));
+ } else {
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ mov(result, FieldOperand(result, offset));
}
}
@@ -3166,41 +3159,6 @@
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(equal, &done, Label::kNear);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_cow_array_map()));
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(eax)) ? ebx : eax);
- __ push(temp);
- __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Map::kElementsKindMask);
- __ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, GetInitialFastElementsKind());
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -4233,8 +4191,7 @@
__ Set(eax, Immediate(instr->arity()));
__ mov(ebx, instr->hydrogen()->property_cell());
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -4267,6 +4224,8 @@
int offset = instr->offset();
+ Handle<Map> transition = instr->transition();
+
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
@@ -4280,18 +4239,20 @@
DeoptimizeIf(overflow, instr->environment());
}
}
- } else if (FLAG_track_double_fields && representation.IsDouble() &&
- !instr->hydrogen()->value()->type().IsSmi() &&
- !instr->hydrogen()->value()->type().IsHeapNumber()) {
- Register value = ToRegister(instr->value());
- Label do_store;
- __ JumpIfSmi(value, &do_store);
- Handle<Map> map(isolate()->factory()->heap_number_map());
- DoCheckMapCommon(value, map, REQUIRE_EXACT_MAP, instr);
- __ bind(&do_store);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movdbl(FieldOperand(object, offset), value);
+ } else {
+ __ fstp_d(FieldOperand(object, offset));
+ }
+ return;
}
- Handle<Map> transition = instr->transition();
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
@@ -4337,6 +4298,7 @@
__ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ mov(FieldOperand(write_register, offset), handle_value);
}
} else {
@@ -5510,6 +5472,8 @@
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -6182,7 +6146,8 @@
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties));
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index cffe5b1..6c9098e 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -736,7 +736,7 @@
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -834,8 +834,8 @@
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -1392,8 +1392,8 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1560,8 +1560,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
@@ -1604,13 +1604,24 @@
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
@@ -1629,8 +1640,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2172,9 +2183,7 @@
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
- ASSERT(temp == NULL || FLAG_track_double_fields);
- return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2213,12 +2222,6 @@
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2433,6 +2436,13 @@
val = UseRegisterOrConstant(instr->value());
} else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseX87TopOfStack(instr->value());
+ }
} else {
val = UseRegister(instr->value());
}
@@ -2440,15 +2450,14 @@
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
+ needs_write_barrier_for_map) ? TempRegister() : NULL;
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result =
new(zone()) LStoreNamedField(obj, val, temp, temp_map);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
@@ -2584,7 +2593,7 @@
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 490b780..8202418 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -117,7 +117,6 @@
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1370,6 +1369,11 @@
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@@ -1490,15 +1494,18 @@
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadNamedField(LOperand* object, LOperand* temp) {
+ explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
- temps_[0] = temp;
+ }
+
+ virtual bool ClobbersDoubleRegisters() const {
+ return !CpuFeatures::IsSupported(SSE2) &&
+ !hydrogen()->representation().IsDouble();
}
LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
@@ -1552,18 +1559,6 @@
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index da29ce7..175b1ca 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1603,10 +1603,32 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ test(Operand::StaticVariable(high_promotion_mode), Immediate(1));
+ j(zero, &allocate_new_space);
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(isolate()->factory()->cons_ascii_string_map()));
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 5e0ee44..9623b9a 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -369,11 +369,13 @@
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -763,8 +765,10 @@
Register value_reg,
Register scratch1,
Register scratch2,
+ Register unused,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
@@ -783,16 +787,6 @@
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
- if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
- __ bind(&do_store);
- }
-
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -809,7 +803,7 @@
// We need an extra register, push
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -828,6 +822,46 @@
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ cvtsi2sd(xmm0, value_reg);
+ } else {
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ }
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ } else {
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ }
+
+ __ bind(&do_store);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -839,7 +873,7 @@
__ pop(scratch1); // Return address.
__ push(receiver_reg);
__ push(Immediate(transition));
- __ push(eax);
+ __ push(value_reg);
__ push(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -853,12 +887,11 @@
__ mov(scratch1, Immediate(transition));
__ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -875,12 +908,20 @@
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(receiver_reg,
offset,
name_reg,
@@ -892,12 +933,20 @@
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ mov(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ mov(FieldOperand(scratch1, offset), value_reg);
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(scratch1,
offset,
name_reg,
@@ -948,13 +997,53 @@
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(value_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ cvtsi2sd(xmm0, value_reg);
+ } else {
+ __ push(value_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(value_reg);
+ }
+ __ SmiTag(value_reg);
+ __ jmp(&do_store);
+ __ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ } else {
+ __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+ }
__ bind(&do_store);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
+ }
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
+ return;
}
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
@@ -976,7 +1065,7 @@
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
+ __ mov(FieldOperand(scratch1, offset), value_reg);
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
@@ -1236,10 +1325,20 @@
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1494,7 +1593,9 @@
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
name, &miss);
- GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
+ GenerateFastPropertyLoad(
+ masm(), edi, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(edi, &miss);
diff --git a/src/ic.cc b/src/ic.cc
index 080c7bf..31845f2 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -182,13 +182,13 @@
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
- // If the code is NORMAL, it handles dictionary mode objects. Such stubs do
- // not check maps, but do positive/negative lookups.
- if (target->type() != Code::NORMAL) {
- Map* map = target->FindFirstMap();
- if (map != NULL && map->is_deprecated()) {
- return true;
- }
+ if (target->is_keyed_load_stub() ||
+ target->is_keyed_call_stub() ||
+ target->is_keyed_store_stub()) {
+ // Determine whether the failure is due to a name failure.
+ if (!name->IsName()) return false;
+ Name* stub_name = target->FindFirstName();
+ if (Name::cast(name) != stub_name) return false;
}
InlineCacheHolderFlag cache_holder =
@@ -217,10 +217,30 @@
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
map->RemoveFromCodeCache(String::cast(name), target, index);
+ // For loads, handlers are stored in addition to the ICs on the map. Remove
+ // those, too.
+ if (target->is_load_stub() || target->is_keyed_load_stub()) {
+ Code* handler = target->FindFirstCode();
+ index = map->IndexInCodeCache(name, handler);
+ if (index >= 0) {
+ map->RemoveFromCodeCache(String::cast(name), handler, index);
+ }
+ }
return true;
}
- return false;
+ // If the IC is shared between multiple receivers (slow dictionary mode), then
+ // the map cannot be deprecated and the stub invalidated.
+ if (cache_holder != OWN_MAP) return false;
+
+ // The stub is not in the cache. We've ruled out all other kinds of failure
+ // except for proptotype chain changes, a deprecated map, or a map that's
+ // different from the one that the stub expects. If the map hasn't changed,
+ // assume it's a prototype failure. Treat deprecated maps in the same way as
+ // prototype failures (stay monomorphic if possible).
+ Map* old_map = target->FindFirstMap();
+ if (old_map == NULL) return false;
+ return old_map == map || old_map->is_deprecated();
}
@@ -230,22 +250,13 @@
if (state != MONOMORPHIC || !name->IsString()) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
- // For keyed load/store/call, the most likely cause of cache failure is
- // that the key has changed. We do not distinguish between
- // prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC ||
- kind == Code::KEYED_CALL_IC) {
- return MONOMORPHIC;
- }
-
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
// Call stubs handle this later to allow extra IC state
// transitions.
- if (kind != Code::CALL_IC &&
+ if (kind != Code::CALL_IC && kind != Code::KEYED_CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@@ -724,8 +735,7 @@
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
object, name);
- } else if (kind_ == Code::CALL_IC &&
- TryRemoveInvalidPrototypeDependentStub(target(),
+ } else if (TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
state = MONOMORPHIC_PROTOTYPE_FAILURE;
@@ -748,15 +758,7 @@
case UNINITIALIZED:
case MONOMORPHIC_PROTOTYPE_FAILURE:
case PREMONOMORPHIC:
- set_target(*code);
- break;
case MONOMORPHIC:
- if (code->ic_state() != MONOMORPHIC) {
- Map* map = target()->FindFirstMap();
- if (map != NULL) {
- UpdateMegamorphicCache(map, *name, target());
- }
- }
set_target(*code);
break;
case MEGAMORPHIC: {
@@ -986,14 +988,25 @@
CodeHandleList handlers;
int number_of_valid_maps;
+ int handler_to_overwrite = -1;
+ Handle<Map> new_receiver_map(receiver->map());
{
AssertNoAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
number_of_valid_maps = number_of_maps;
+
for (int i = 0; i < number_of_maps; i++) {
- if (receiver_maps.at(i)->is_deprecated()) {
+ Handle<Map> map = receiver_maps.at(i);
+ // Filter out deprecated maps to ensure its instances get migrated.
+ if (map->is_deprecated()) {
number_of_valid_maps--;
+ // If the receiver map is already in the polymorphic IC, this indicates
+ // there was a prototoype chain failure. In that case, just overwrite the
+ // handler.
+ } else if (map.is_identical_to(new_receiver_map)) {
+ number_of_valid_maps--;
+ handler_to_overwrite = i;
}
}
@@ -1007,14 +1020,16 @@
target()->FindAllCode(&handlers, receiver_maps.length());
}
- if (!AddOneReceiverMapIfMissing(&receiver_maps,
- Handle<Map>(receiver->map()))) {
- return false;
+ number_of_valid_maps++;
+ if (handler_to_overwrite >= 0) {
+ handlers.Set(handler_to_overwrite, code);
+ } else {
+ receiver_maps.Add(new_receiver_map);
+ handlers.Add(code);
}
- handlers.Add(code);
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, number_of_valid_maps + 1, name);
+ &receiver_maps, &handlers, number_of_valid_maps, name);
set_target(*ic);
return true;
}
@@ -1101,38 +1116,9 @@
if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
break;
}
- }
- if (target()->type() != Code::NORMAL) {
- if (target()->is_load_stub()) {
+
+ if (target()->type() != Code::NORMAL) {
CopyICToMegamorphicCache(name);
- } else if (target()->is_store_stub()) {
- // Ensure that the IC stays monomorphic when replacing a monomorphic
- // IC for a deprecated map.
- // TODO(verwaest): Remove this code once polymorphic store ICs are
- // implemented. Updating the polymorphic IC will keep it monomorphic
- // by filtering deprecated maps.
- MapHandleList maps;
- Code* handler = target();
- handler->FindAllMaps(&maps);
- for (int i = 0; i < Min(1, maps.length()); i++) {
- if (maps.at(i)->is_deprecated()) {
- UpdateMonomorphicIC(receiver, code, name);
- return;
- }
- }
- if (maps.length() > 0) {
- if (receiver->map() == *maps.at(0)) {
- UpdateMonomorphicIC(receiver, code, name);
- return;
- }
- UpdateMegamorphicCache(*maps.at(0), *name, handler);
- }
- } else {
- Code* handler = target();
- Map* map = handler->FindFirstMap();
- if (map != NULL) {
- UpdateMegamorphicCache(map, *name, handler);
- }
}
}
@@ -1235,7 +1221,7 @@
}
-void IC::UpdateMegamorphicCache(Map* map, String* name, Code* code) {
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
// Cache code holding map should be consistent with
// GenerateMonomorphicCacheProbe.
isolate()->stub_cache()->Set(name, map, code);
@@ -1255,7 +1241,8 @@
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
+ name, receiver, holder,
+ lookup->GetFieldIndex(), lookup->representation());
case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction());
return isolate()->stub_cache()->ComputeLoadConstant(
@@ -1297,7 +1284,7 @@
PropertyIndex lengthIndex =
PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lengthIndex);
+ name, receiver, holder, lengthIndex, Representation::Tagged());
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1464,7 +1451,8 @@
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
+ name, receiver, holder,
+ lookup->GetFieldIndex(), lookup->representation());
case CONSTANT_FUNCTION: {
Handle<JSFunction> constant(lookup->GetConstantFunction(), isolate());
return isolate()->stub_cache()->ComputeKeyedLoadConstant(
@@ -1497,7 +1485,8 @@
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value,
- LookupResult* lookup) {
+ LookupResult* lookup,
+ IC::State* state) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
if (lookup->IsFound()) {
@@ -1534,7 +1523,21 @@
PropertyDetails target_details =
lookup->GetTransitionDetails(receiver->map());
if (target_details.IsReadOnly()) return false;
- return value->FitsRepresentation(target_details.representation());
+
+ // If the value that's being stored does not fit in the field that the
+ // instance would transition to, create a new transition that fits the value.
+ // This has to be done before generating the IC, since that IC will embed the
+ // transition target.
+ // Ensure the instance and its map were migrated before trying to update the
+ // transition target.
+ ASSERT(!receiver->map()->is_deprecated());
+ if (!value->FitsRepresentation(target_details.representation())) {
+ Handle<Map> target(lookup->GetTransitionMapFromMap(receiver->map()));
+ Map::GeneralizeRepresentation(
+ target, target->LastAdded(), value->OptimalRepresentation());
+ *state = MONOMORPHIC_PROTOTYPE_FAILURE;
+ }
+ return true;
}
@@ -1618,7 +1621,7 @@
}
LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, value, &lookup)) {
+ if (LookupForWrite(receiver, name, value, &lookup, &state)) {
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
@@ -2259,11 +2262,24 @@
int new_unused = transition->unused_property_fields();
int new_size = old_storage->length() + new_unused + 1;
Object* result;
- { MaybeObject* maybe_result = old_storage->CopySize(new_size);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = old_storage->CopySize(new_size);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+
FixedArray* new_storage = FixedArray::cast(result);
- new_storage->set(old_storage->length(), value);
+
+ Object* to_store = value;
+
+ if (FLAG_track_double_fields) {
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(transition->LastAdded());
+ if (details.representation().IsDouble()) {
+ MaybeObject* maybe_storage =
+ isolate->heap()->AllocateHeapNumber(value->Number());
+ if (!maybe_storage->To(&to_store)) return maybe_storage;
+ }
+ }
+
+ new_storage->set(old_storage->length(), to_store);
// Set the new property value and do the map transition.
object->set_properties(new_storage);
@@ -2305,6 +2321,24 @@
}
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
+ NoHandleAllocation na(isolate);
+ ASSERT(args.length() == 3);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> key = args.at<Object>(1);
+ Handle<Object> value = args.at<Object>(2);
+ StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+ return Runtime::SetObjectProperty(isolate,
+ object,
+ key,
+ value,
+ NONE,
+ strict_mode);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);
diff --git a/src/ic.h b/src/ic.h
index 4bf259a..739f34c 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -45,6 +45,7 @@
ICU(KeyedCallIC_Miss) \
ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
+ ICU(StoreIC_Slow) \
ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \
ICU(KeyedStoreIC_MissForceGeneric) \
@@ -184,7 +185,7 @@
Handle<JSObject> receiver,
Handle<String> name,
Handle<Code> code);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code);
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
virtual Handle<Code> megamorphic_stub() {
UNREACHABLE();
return Handle<Code>::null();
@@ -471,7 +472,7 @@
virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
Handle<JSObject> receiver,
Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
private:
// Stub accessors.
@@ -504,6 +505,7 @@
}
// Code generators for stub routines. Only called once at startup.
+ static void GenerateSlow(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
@@ -620,7 +622,7 @@
StrictModeFlag strict_mode,
Handle<JSObject> receiver,
Handle<String> name);
- virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
+ virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
virtual Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedStoreIC_Generic();
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 658a34c..bacbb93 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -490,10 +490,16 @@
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
-
+ // Only start incremental marking in a safe state: 1) when expose GC is
+ // deactivated, 2) when incremental marking is turned on, 3) when we are
+ // currently not in a GC, and 4) when we are currently not serializing
+ // or deserializing the heap.
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
+ FLAG_incremental_marking_steps &&
+ heap_->gc_state() == Heap::NOT_IN_GC &&
!Serializer::enabled() &&
+ heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
@@ -561,17 +567,21 @@
}
-void IncrementalMarking::Start() {
+void IncrementalMarking::Start(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
}
ASSERT(FLAG_incremental_marking);
+ ASSERT(FLAG_incremental_marking_steps);
ASSERT(state_ == STOPPED);
+ ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
+ ASSERT(!Serializer::enabled());
+ ASSERT(heap_->isolate()->IsInitialized());
ResetStepCounters();
if (heap_->IsSweepingComplete()) {
- StartMarking(ALLOW_COMPACTION);
+ StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -860,6 +870,17 @@
}
+void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
+ if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
+ // TODO(hpayer): Let's play safe for now, but compaction should be
+ // in principle possible.
+ Start(PREVENT_COMPACTION);
+ } else {
+ Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+ }
+}
+
+
void IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
@@ -965,7 +986,7 @@
PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
- marking_speed_ += kMarkingSpeedAccellerationInterval;
+ marking_speed_ += kMarkingSpeedAccelleration;
marking_speed_ = static_cast<int>(
Min(kMaxMarkingSpeed,
static_cast<intptr_t>(marking_speed_ * 1.3)));
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index fc5a978..47d5a51 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -75,7 +75,9 @@
bool WorthActivating();
- void Start();
+ enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+ void Start(CompactionFlag flag = ALLOW_COMPACTION);
void Stop();
@@ -110,10 +112,7 @@
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
- void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialMarkingSpeed,
- GC_VIA_STACK_GUARD);
- }
+ void OldSpaceStep(intptr_t allocated);
void Step(intptr_t allocated, CompletionAction action);
@@ -226,8 +225,6 @@
void ResetStepCounters();
- enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
void StartMarking(CompactionFlag flag);
void ActivateIncrementalWriteBarrier(PagedSpace* space);
diff --git a/src/json-parser.h b/src/json-parser.h
index 78c1a7a..ddc3b73 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -381,39 +381,23 @@
// First check whether there is a single expected transition. If so, try
// to parse it first.
bool follow_expected = false;
+ Handle<Map> target;
if (seq_ascii) {
key = JSObject::ExpectedTransitionKey(map);
follow_expected = !key.is_null() && ParseJsonString(key);
}
// If the expected transition hits, follow it.
if (follow_expected) {
- map = JSObject::ExpectedTransitionTarget(map);
+ target = JSObject::ExpectedTransitionTarget(map);
} else {
// If the expected transition failed, parse an internalized string and
// try to find a matching transition.
key = ParseJsonInternalizedString();
if (key.is_null()) return ReportUnexpectedCharacter();
- Handle<Map> target = JSObject::FindTransitionToField(map, key);
+ target = JSObject::FindTransitionToField(map, key);
// If a transition was found, follow it and continue.
- if (!target.is_null()) {
- map = target;
- } else {
- // If no transition was found, commit the intermediate state to the
- // object and stop transitioning.
- JSObject::TransitionToMap(json_object, map);
- int length = properties.length();
- for (int i = 0; i < length; i++) {
- Handle<Object> value = properties[i];
- Representation representation =
- map->instance_descriptors()->GetDetails(i).representation();
- if (representation.IsDouble() && value->IsSmi()) {
- // TODO(verwaest): Allocate heap number.
- }
- json_object->FastPropertyAtPut(i, *value);
- }
- transitioning = false;
- }
+ transitioning = !target.is_null();
}
if (c0_ != ':') return ReportUnexpectedCharacter();
@@ -421,16 +405,35 @@
value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- properties.Add(value, zone());
if (transitioning) {
- int field = properties.length() - 1;
- Representation expected_representation =
- map->instance_descriptors()->GetDetails(field).representation();
- if (!value->FitsRepresentation(expected_representation)) {
- map = Map::GeneralizeRepresentation(
- map, field, value->OptimalRepresentation());
+ int descriptor = map->NumberOfOwnDescriptors();
+ PropertyDetails details =
+ target->instance_descriptors()->GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+
+ if (value->FitsRepresentation(expected_representation)) {
+ // If the target representation is double and the value is already
+ // double, use the existing box.
+ if (FLAG_track_double_fields &&
+ value->IsSmi() &&
+ expected_representation.IsDouble()) {
+ value = factory()->NewHeapNumber(
+ Handle<Smi>::cast(value)->value());
+ }
+ properties.Add(value, zone());
+ map = target;
+ continue;
+ } else {
+ transitioning = false;
}
- continue;
+ }
+
+ // Commit the intermediate state to the object and stop transitioning.
+ JSObject::AllocateStorageForMap(json_object, map);
+ int length = properties.length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value = properties[i];
+ json_object->FastPropertyAtPut(i, *value);
}
} else {
key = ParseJsonInternalizedString();
@@ -450,14 +453,19 @@
// If we transitioned until the very end, transition the map now.
if (transitioning) {
- JSObject::TransitionToMap(json_object, map);
+ JSObject::AllocateStorageForMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
Handle<Object> value = properties[i];
- Representation representation =
- map->instance_descriptors()->GetDetails(i).representation();
- if (representation.IsDouble() && value->IsSmi()) {
- // TODO(verwaest): Allocate heap number.
+ // If the target representation is double and the value is already
+ // double, use the existing box.
+ if (FLAG_track_double_fields && value->IsSmi()) {
+ Representation representation =
+ map->instance_descriptors()->GetDetails(i).representation();
+ if (representation.IsDouble()) {
+ value = factory()->NewHeapNumber(
+ Handle<Smi>::cast(value)->value());
+ }
}
json_object->FastPropertyAtPut(i, *value);
}
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 47a0129..b67a9f6 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -644,7 +644,7 @@
Handle<Object> property;
if (details.type() == FIELD && *map == object->map()) {
property = Handle<Object>(
- object->FastPropertyAt(
+ object->RawFastPropertyAt(
map->instance_descriptors()->GetFieldIndex(i)),
isolate_);
} else {
diff --git a/src/list-inl.h b/src/list-inl.h
index d815a7e..143c830 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -104,6 +104,13 @@
template<typename T, class P>
+void List<T, P>::Set(int index, const T& elm) {
+ ASSERT(index >= 0 && index <= length_);
+ data_[index] = elm;
+}
+
+
+template<typename T, class P>
void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
ASSERT(index >= 0 && index <= length_);
Add(elm, alloc);
diff --git a/src/list.h b/src/list.h
index 43d982f..0e4e35b 100644
--- a/src/list.h
+++ b/src/list.h
@@ -115,6 +115,9 @@
void InsertAt(int index, const T& element,
AllocationPolicy allocator = AllocationPolicy());
+ // Overwrites the element at the specific index.
+ void Set(int index, const T& element);
+
// Added 'count' elements with the value 'value' and returns a
// vector that allows access to the elements. The vector is valid
// until the next change is made to this list.
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index a010f4e..74132b3 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -140,6 +140,7 @@
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
+ current_hint_operand_(NULL),
spill_operand_(new(zone) LOperand()),
spill_start_index_(kMaxInt) { }
@@ -229,13 +230,6 @@
}
-UsePosition* LiveRange::FirstPosWithHint() const {
- UsePosition* pos = first_pos_;
- while (pos != NULL && !pos->HasHint()) pos = pos->next();
- return pos;
-}
-
-
LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
@@ -377,7 +371,7 @@
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
if (start.Value() == other_start.Value()) {
- UsePosition* pos = FirstPosWithHint();
+ UsePosition* pos = first_pos();
if (pos == NULL) return false;
UsePosition* other_pos = other->first_pos();
if (other_pos == NULL) return true;
@@ -459,9 +453,11 @@
id_,
pos.Value());
UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
+ UsePosition* prev_hint = NULL;
UsePosition* prev = NULL;
UsePosition* current = first_pos_;
while (current != NULL && current->pos().Value() < pos.Value()) {
+ prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
current = current->next();
}
@@ -473,6 +469,10 @@
use_pos->next_ = prev->next_;
prev->next_ = use_pos;
}
+
+ if (prev_hint == NULL && use_pos->HasHint()) {
+ current_hint_operand_ = hint;
+ }
}
@@ -625,13 +625,13 @@
bool is_tagged) {
TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
ASSERT(operand->HasFixedPolicy());
- if (operand->policy() == LUnallocated::FIXED_SLOT) {
- operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
- } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
- int reg_index = operand->fixed_index();
+ if (operand->HasFixedSlotPolicy()) {
+ operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
+ } else if (operand->HasFixedRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::REGISTER, reg_index);
- } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
- int reg_index = operand->fixed_index();
+ } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ int reg_index = operand->fixed_register_index();
operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
} else {
UNREACHABLE();
@@ -846,7 +846,7 @@
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
- } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+ } else if (cur_input->HasWritableRegisterPolicy()) {
// The live range of writable input registers always goes until the end
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
@@ -925,7 +925,7 @@
if (phi != NULL) {
// This is a phi resolving move.
if (!phi->block()->IsLoopHeader()) {
- hint = LiveRangeFor(phi->id())->FirstHint();
+ hint = LiveRangeFor(phi->id())->current_hint_operand();
}
} else {
if (to->IsUnallocated()) {
@@ -1813,26 +1813,23 @@
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
- UsePosition* hinted_use = current->FirstPosWithHint();
- if (hinted_use != NULL) {
- LOperand* hint = hinted_use->hint();
- if (hint->IsRegister() || hint->IsDoubleRegister()) {
- int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index),
- free_until_pos[register_index].Value(),
- current->id(),
- current->End().Value());
+ LOperand* hint = current->FirstHint();
+ if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+ int register_index = hint->index();
+ TraceAlloc(
+ "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index),
+ free_until_pos[register_index].Value(),
+ current->id(),
+ current->End().Value());
- // The desired register is free until the end of the current live range.
- if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index),
- current->id());
- SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
- return true;
- }
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[register_index].Value() >= current->End().Value()) {
+ TraceAlloc("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index),
+ current->id());
+ SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
+ return true;
}
}
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 0cd5ae0..552ebdd 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -328,10 +328,14 @@
return assigned_register_ != kInvalidAssignment;
}
bool IsSpilled() const { return spilled_; }
- UsePosition* FirstPosWithHint() const;
+ LOperand* current_hint_operand() const {
+ ASSERT(current_hint_operand_ == FirstHint());
+ return current_hint_operand_;
+ }
LOperand* FirstHint() const {
- UsePosition* pos = FirstPosWithHint();
+ UsePosition* pos = first_pos_;
+ while (pos != NULL && !pos->HasHint()) pos = pos->next();
if (pos != NULL) return pos->hint();
return NULL;
}
@@ -398,6 +402,8 @@
// This is used as a cache, it doesn't affect correctness.
mutable UseInterval* current_interval_;
UsePosition* last_processed_use_;
+ // This is used as a cache, it's invalid outside of BuildLiveRanges.
+ LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
};
diff --git a/src/lithium.cc b/src/lithium.cc
index 10d7f71..539f4ee 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -58,24 +58,27 @@
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
+ if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
+ stream->Add("(=%dS)", unalloc->fixed_slot_index());
+ break;
+ }
+ switch (unalloc->extended_policy()) {
case LUnallocated::NONE:
break;
case LUnallocated::FIXED_REGISTER: {
+ int reg_index = unalloc->fixed_register_index();
const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
+ Register::AllocationIndexToString(reg_index);
stream->Add("(=%s)", register_name);
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
+ int reg_index = unalloc->fixed_register_index();
const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+ DoubleRegister::AllocationIndexToString(reg_index);
stream->Add("(=%s)", double_register_name);
break;
}
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
diff --git a/src/lithium.h b/src/lithium.h
index 2418274..388f565 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -92,12 +92,16 @@
class LUnallocated: public LOperand {
public:
- enum Policy {
+ enum BasicPolicy {
+ FIXED_SLOT,
+ EXTENDED_POLICY
+ };
+
+ enum ExtendedPolicy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT
@@ -117,76 +121,32 @@
USED_AT_END
};
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
+ explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
}
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
+ LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_SLOT);
+ value_ |= BasicPolicyField::encode(policy);
+ value_ |= index << FixedSlotIndexField::kShift;
+ ASSERT(this->fixed_slot_index() == index);
}
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
+ LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ value_ |= FixedRegisterField::encode(index);
}
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 3;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 15;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
- static const int kFixedIndexWidth = 32 - kFixedIndexShift;
- STATIC_ASSERT(kFixedIndexWidth > 5);
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
-
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
-
- static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
- static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
-
- bool HasAnyPolicy() const {
- return policy() == ANY;
- }
- bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
- }
- bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
- }
- bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
- }
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ = PolicyField::update(value_, policy);
- }
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
- }
-
- int virtual_register() const {
- return VirtualRegisterField::decode(value_);
- }
-
- void set_virtual_register(unsigned id) {
- value_ = VirtualRegisterField::update(value_, id);
+ LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
+ : LOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
}
LUnallocated* CopyUnconstrained(Zone* zone) {
@@ -200,16 +160,113 @@
return reinterpret_cast<LUnallocated*>(op);
}
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
+ // The encoding used for LUnallocated operands depends on the policy that is
+ // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+ // because it accommodates a larger pay-load.
+ //
+ // For FIXED_SLOT policy:
+ // +------------------------------------------+
+ // | slot_index | vreg | 0 | 001 |
+ // +------------------------------------------+
+ //
+ // For all other (extended) policies:
+ // +------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
+ // +------------------------------------------+ P ... Policy
+ //
+ // The slot index is a signed value which requires us to decode it manually
+ // instead of using the BitField utility class.
+
+ // The superclass has a KindField.
+ STATIC_ASSERT(kKindFieldWidth == 3);
+
+ // BitFields for all unallocated operands.
+ class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+ // BitFields specific to BasicPolicy::FIXED_SLOT.
+ class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+ // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+ class LifetimeField : public BitField<Lifetime, 25, 1> {};
+ class FixedRegisterField : public BitField<int, 26, 6> {};
+
+ static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+ static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+ static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+ static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+ // Predicates for the operand policy.
+ bool HasAnyPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == ANY;
+ }
+ bool HasFixedPolicy() const {
+ return basic_policy() == FIXED_SLOT ||
+ extended_policy() == FIXED_REGISTER ||
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+ bool HasRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY && (
+ extended_policy() == WRITABLE_REGISTER ||
+ extended_policy() == MUST_HAVE_REGISTER);
+ }
+ bool HasSameAsInputPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == SAME_AS_FIRST_INPUT;
+ }
+ bool HasFixedSlotPolicy() const {
+ return basic_policy() == FIXED_SLOT;
+ }
+ bool HasFixedRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER;
+ }
+ bool HasFixedDoubleRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+ bool HasWritableRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == WRITABLE_REGISTER;
}
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
+ // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+ BasicPolicy basic_policy() const {
+ return BasicPolicyField::decode(value_);
+ }
+
+ // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+ ExtendedPolicy extended_policy() const {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return ExtendedPolicyField::decode(value_);
+ }
+
+ // [fixed_slot_index]: Only for FIXED_SLOT.
+ int fixed_slot_index() const {
+ ASSERT(HasFixedSlotPolicy());
+ return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+ }
+
+ // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+ int fixed_register_index() const {
+ ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ return FixedRegisterField::decode(value_);
+ }
+
+ // [virtual_register]: The virtual register ID for this operand.
+ int virtual_register() const {
+ return VirtualRegisterField::decode(value_);
+ }
+ void set_virtual_register(unsigned id) {
+ value_ = VirtualRegisterField::update(value_, id);
+ }
+
+ // [lifetime]: Only for non-FIXED_SLOT.
+ bool IsUsedAtStart() {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return LifetimeField::decode(value_) == USED_AT_START;
}
};
diff --git a/src/messages.js b/src/messages.js
index c4de849..b9bce1e 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -101,12 +101,18 @@
observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
+ parameterless_typed_array_constr:
+ ["%0"," constructor should have at least one argument."],
+ not_typed_array: ["this is not a typed array."],
+ invalid_argument: ["invalid_argument"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
invalid_typed_array_offset: ["Start offset is too large"],
invalid_typed_array_length: ["Length is too large"],
invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
+ typed_array_set_source_too_large:
+ ["Source is too large"],
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
// SyntaxError
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 6257207..f1c2553 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -75,6 +75,28 @@
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -117,9 +139,10 @@
int constant_stack_parameter_count) {
// register state
// a0 -- number of arguments
+ // a1 -- function
// a2 -- type info cell with elements kind
- static Register registers[] = { a2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { a1, a2 };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &a0;
@@ -4100,12 +4123,6 @@
Register InstanceofStub::right() { return a1; }
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -5104,6 +5121,7 @@
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(a3, &miss);
__ Branch(&miss, gt, a3, Operand(terminal_kind_sentinel));
// Make sure the function is the Array() function
__ LoadArrayFunction(a3);
@@ -6301,8 +6319,36 @@
__ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ li(t0, Operand(high_promotion_mode));
+ __ lw(t0, MemOperand(t0, 0));
+ __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
+
+ __ mov(t3, v0);
+ __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+ __ RecordWriteField(t3,
+ ConsString::kFirstOffset,
+ a0,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+ __ RecordWriteField(t3,
+ ConsString::kSecondOffset,
+ a1,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
__ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
+
+ __ bind(&after_writing);
+
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -7182,6 +7228,9 @@
{ REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
+ // StringAddStub::Generate
+ { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
+ { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -7704,13 +7753,8 @@
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ Branch(&no_info, eq, a2, Operand(undefined_sentinel));
- __ lw(a3, FieldMemOperand(a2, kPointerSize));
-
- // There is no info if the call site went megamorphic either
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ Branch(&no_info, eq, a3,
- Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate())));
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(a3, &no_info);
__ SmiUntag(a3);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index a6fd39a..872af86 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1592,7 +1592,8 @@
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
@@ -1943,11 +1944,12 @@
Label resume;
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&resume, ne, result_register(), Operand(at));
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1959,18 +1961,7 @@
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ sw(a1, FieldMemOperand(result_register(),
JSGeneratorObject::kContinuationOffset));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2057,7 +2048,7 @@
__ Subu(a3, a3, Operand(1));
__ Branch(&call_resume, lt, a3, Operand(zero_reg));
__ push(a2);
- __ b(&push_operand_holes);
+ __ Branch(&push_operand_holes);
__ bind(&call_resume);
__ push(a1);
__ push(result_register());
@@ -2076,6 +2067,56 @@
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), a0, a2, a3, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ li(a1, Operand(map));
+ __ pop(a2);
+ __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
+ __ li(t0, Operand(isolate()->factory()->empty_fixed_array()));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ sw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ sw(t0, FieldMemOperand(a0, JSObject::kPropertiesOffset));
+ __ sw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
+ __ sw(a2,
+ FieldMemOperand(a0, JSGeneratorObject::kResultValuePropertyOffset));
+ __ sw(a3,
+ FieldMemOperand(a0, JSGeneratorObject::kResultDonePropertyOffset));
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(a0, JSGeneratorObject::kResultValuePropertyOffset,
+ a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ __ mov(result_register(), a0);
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ lw(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index e434fdb..02055a4 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1440,6 +1440,25 @@
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a2 : key
+ // -- a1 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a1, a2, a0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- a0 : value
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 301ceba..77e4216 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2688,31 +2688,20 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
- if (!FLAG_track_double_fields) {
- ASSERT(!instr->hydrogen()->representation().IsDouble());
- }
- Register temp = instr->hydrogen()->representation().IsDouble()
- ? scratch0() : ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ lw(temp, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ lw(temp, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(temp, FieldMemOperand(temp, instr->hydrogen()->offset()));
+ if (instr->hydrogen()->representation().IsDouble()) {
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ ldc1(result, FieldMemOperand(object, offset));
+ return;
}
- if (instr->hydrogen()->representation().IsDouble()) {
- Label load_from_heap_number, done;
- DoubleRegister result = ToDoubleRegister(instr->result());
- FPURegister flt_scratch = double_scratch0().low();
- __ JumpIfNotSmi(temp, &load_from_heap_number);
- __ SmiUntag(temp);
- __ mtc1(temp, flt_scratch);
- __ cvt_d_w(result, flt_scratch);
- __ Branch(&done);
- __ bind(&load_from_heap_number);
- __ ldc1(result, FieldMemOperand(temp, HeapNumber::kValueOffset));
- __ bind(&done);
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ lw(result, FieldMemOperand(object, offset));
+ } else {
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(result, FieldMemOperand(result, offset));
}
}
@@ -2857,38 +2846,6 @@
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
- __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
- __ Branch(&done, eq, scratch, Operand(at));
- // |scratch| still contains |input|'s map.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ Ext(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ Branch(&fail, lt, scratch,
- Operand(GetInitialFastElementsKind()));
- __ Branch(&done, le, scratch,
- Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ Branch(&fail, lt, scratch,
- Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ Branch(&done, le, scratch,
- Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -3913,8 +3870,7 @@
__ li(a0, Operand(instr->arity()));
__ li(a2, Operand(instr->hydrogen()->property_cell()));
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -3944,29 +3900,26 @@
Representation representation = instr->representation();
Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- ASSERT(!object.is(value));
Register scratch = scratch0();
int offset = instr->offset();
+ Handle<Map> transition = instr->transition();
+
if (FLAG_track_fields && representation.IsSmi()) {
+ Register value = ToRegister(instr->value());
__ SmiTagCheckOverflow(value, value, scratch);
if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
}
- } else if (FLAG_track_double_fields && representation.IsDouble() &&
- !instr->hydrogen()->value()->type().IsSmi() &&
- !instr->hydrogen()->value()->type().IsHeapNumber()) {
- Label do_store;
- __ JumpIfSmi(value, &do_store);
- Handle<Map> map(isolate()->factory()->heap_number_map());
-
- __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- DoCheckMapCommon(scratch, map, REQUIRE_EXACT_MAP, instr->environment());
- __ bind(&do_store);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ __ sdc1(value, FieldMemOperand(object, offset));
+ return;
}
- Handle<Map> transition = instr->transition();
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
@@ -3988,6 +3941,8 @@
}
// Do the store.
+ Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@@ -4855,6 +4810,8 @@
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -5297,7 +5254,8 @@
// Pick the right runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index d346a29..28309e2 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -677,7 +677,7 @@
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -1306,8 +1306,8 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1402,15 +1402,15 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
+ left = UseRegister(instr->BetterLeftOperand());
temp = TempRegister();
} else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
@@ -1475,8 +1475,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
LAddI* add = new(zone()) LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1507,8 +1507,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2029,12 +2029,6 @@
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2199,17 +2193,22 @@
: UseRegisterAtStart(instr->object());
}
- LOperand* val =
- needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())
- ? UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* val;
+ if (needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
+ } else {
+ val = UseRegister(instr->value());
+ }
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
@@ -2328,7 +2327,7 @@
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 8b46e6f..bb8c993 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -122,7 +122,6 @@
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1521,18 +1520,6 @@
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 301e92f..81e9ec9 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3135,8 +3135,34 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+ li(scratch1, Operand(high_promotion_mode));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
+
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
+
InitializeNewString(result,
length,
Heap::kConsAsciiStringMapRootIndex,
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index aeb26ee..80ab31a 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -307,11 +307,13 @@
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -442,8 +444,10 @@
Register value_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// a0 : value.
Label exit;
@@ -465,16 +469,6 @@
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
- if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
- miss_label, DONT_DO_SMI_CHECK);
- __ bind(&do_store);
- }
-
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -490,7 +484,7 @@
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -509,6 +503,30 @@
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch1, value_reg);
+ __ mtc1(scratch1, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_restore_name, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -536,7 +554,7 @@
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@@ -554,7 +572,11 @@
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
@@ -562,7 +584,11 @@
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(receiver_reg,
offset,
name_reg,
@@ -576,7 +602,11 @@
// Get the properties array
__ lw(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ sw(storage_reg, FieldMemOperand(scratch1, offset));
+ } else {
+ __ sw(value_reg, FieldMemOperand(scratch1, offset));
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
@@ -584,6 +614,11 @@
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ mov(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
offset,
@@ -643,11 +678,37 @@
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ lw(scratch1, FieldMemOperand(receiver_reg, offset));
+ } else {
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(scratch1, FieldMemOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiUntag(scratch2, value_reg);
+ __ mtc1(scratch2, f6);
+ __ cvt_d_w(f4, f6);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
miss_label, DONT_DO_SMI_CHECK);
+ __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
__ bind(&do_store);
+ __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ mov(v0, a0);
+ __ Ret();
+ return;
}
// TODO(verwaest): Share this code as a code stub.
@@ -1308,9 +1369,20 @@
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
- __ Ret();
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ mov(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1540,7 +1612,8 @@
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
- GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), a1, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 4e34d18..7592a89 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -32,6 +32,10 @@
#endif
#include <signal.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "bootstrapper.h"
@@ -324,13 +328,18 @@
}
#endif
i::Serializer::Enable();
- Persistent<Context> context = v8::Context::New();
+ Isolate* isolate = Isolate::GetCurrent();
+ Persistent<Context> context;
+ {
+ HandleScope handle_scope(isolate);
+ context.Reset(isolate, Context::New(isolate));
+ }
+
if (context.IsEmpty()) {
fprintf(stderr,
"\nException thrown while compiling natives - see above.\n\n");
exit(1);
}
- Isolate* isolate = context->GetIsolate();
if (i::FLAG_extra_code != NULL) {
context->Enter();
// Capture 100 frames if anything happens.
diff --git a/src/object-observe.js b/src/object-observe.js
index bfb4a65..77409b9 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -138,7 +138,9 @@
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
- ObjectFreeze(changeRecord);
+ // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
+ // slow.
+ // ObjectFreeze(changeRecord);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@@ -164,7 +166,9 @@
%DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
READ_ONLY + DONT_DELETE);
}
- ObjectFreeze(newRecord);
+ // TODO(rafaelw): This breaks spec-compliance. Re-enable when freezing isn't
+ // slow.
+ // ObjectFreeze(newRecord);
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index ee6df1d..ecbf9d6 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -323,10 +323,6 @@
instance_size() < HEAP->Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
- DescriptorArray* descriptors = instance_descriptors();
- for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
- CHECK_EQ(i, descriptors->GetDetails(i).descriptor_index() - 1);
- }
SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
if (HasTransitionArray()) {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
@@ -419,6 +415,7 @@
VerifyObjectField(kReceiverOffset);
VerifyObjectField(kOperandStackOffset);
VerifyObjectField(kContinuationOffset);
+ VerifyObjectField(kStackHandlerIndexOffset);
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 29474b9..06a13df 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -58,10 +58,7 @@
Smi* PropertyDetails::AsSmi() {
- // Ensure the upper 2 bits have the same value by sign extending it. This is
- // necessary to be able to use the 31st bit of the property details.
- int value = value_ << 1;
- return Smi::FromInt(value >> 1);
+ return Smi::FromInt(value_);
}
@@ -286,6 +283,16 @@
return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
}
+
+MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
+ Representation representation,
+ PretenureFlag tenure) {
+ if (!FLAG_track_double_fields) return this;
+ if (!representation.IsDouble()) return this;
+ return heap->AllocateHeapNumber(Number(), tenure);
+}
+
+
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@@ -1512,7 +1519,7 @@
}
-MaybeObject* JSObject::TransitionToMap(Map* map) {
+MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
ASSERT(this->map()->inobject_properties() == map->inobject_properties());
ElementsKind expected_kind = this->map()->elements_kind();
if (map->elements_kind() != expected_kind) {
@@ -1702,10 +1709,17 @@
}
+MaybeObject* JSObject::FastPropertyAt(Representation representation,
+ int index) {
+ Object* raw_value = RawFastPropertyAt(index);
+ return raw_value->AllocateNewStorageFor(GetHeap(), representation);
+}
+
+
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object* JSObject::FastPropertyAt(int index) {
+Object* JSObject::RawFastPropertyAt(int index) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@@ -1718,7 +1732,7 @@
}
-Object* JSObject::FastPropertyAtPut(int index, Object* value) {
+void JSObject::FastPropertyAtPut(int index, Object* value) {
// Adjust for the number of properties stored in the object.
index -= map()->inobject_properties();
if (index < 0) {
@@ -1729,7 +1743,6 @@
ASSERT(index < properties()->length());
properties()->set(index, value);
}
- return value;
}
@@ -2347,9 +2360,6 @@
const WhitenessWitness&) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
@@ -2367,9 +2377,6 @@
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() <=
- number_of_descriptors());
- ASSERT(desc->GetDetails().descriptor_index() > 0);
ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
@@ -2381,9 +2388,7 @@
void DescriptorArray::Append(Descriptor* desc,
const WhitenessWitness& witness) {
int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc, witness);
uint32_t hash = desc->GetKey()->Hash();
@@ -2402,9 +2407,7 @@
void DescriptorArray::Append(Descriptor* desc) {
int descriptor_number = number_of_descriptors();
- int enumeration_index = descriptor_number + 1;
SetNumberOfDescriptors(descriptor_number + 1);
- desc->SetEnumerationIndex(enumeration_index);
Set(descriptor_number, desc);
uint32_t hash = desc->GetKey()->Hash();
@@ -3610,6 +3613,12 @@
}
+Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+ return GeneralizeRepresentation(map, 0, Representation::Smi());
+}
+
+
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(),
@@ -4146,23 +4155,6 @@
void Map::InitializeDescriptors(DescriptorArray* descriptors) {
int len = descriptors->number_of_descriptors();
-#ifdef DEBUG
- ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
-
- bool used_indices[DescriptorArray::kMaxNumberOfDescriptors];
- for (int i = 0; i < len; ++i) used_indices[i] = false;
-
- // Ensure that all enumeration indexes between 1 and length occur uniquely in
- // the descriptor array.
- for (int i = 0; i < len; ++i) {
- int enum_index = descriptors->GetDetails(i).descriptor_index() -
- PropertyDetails::kInitialIndex;
- ASSERT(0 <= enum_index && enum_index < len);
- ASSERT(!used_indices[enum_index]);
- used_indices[enum_index] = true;
- }
-#endif
-
set_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(len);
}
@@ -5132,6 +5124,7 @@
ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
+SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 5aeeec6..0849a63 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -271,7 +271,7 @@
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
- FastPropertyAt(index)->ShortPrint(out);
+ RawFastPropertyAt(index)->ShortPrint(out);
PrintF(out, " (field at offset %d)\n", index);
break;
}
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 6a64cbf..add247e 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -571,6 +571,12 @@
return false;
}
+ // If this is a native function we do not flush the code because %SetCode
+ // breaks the one-to-one relation between SharedFunctionInfo and Code.
+ if (shared_info->native()) {
+ return false;
+ }
+
if (FLAG_age_code) {
return shared_info->code()->IsOld();
} else {
diff --git a/src/objects.cc b/src/objects.cc
index 94fd487..d127d1b 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -657,8 +657,8 @@
ASSERT(enumeration_index > 0);
}
- details = PropertyDetails(details.attributes(), details.type(),
- Representation::None(), enumeration_index);
+ details = PropertyDetails(
+ details.attributes(), details.type(), enumeration_index);
if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
@@ -815,11 +815,14 @@
value = result->holder()->GetNormalizedProperty(result);
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
- case FIELD:
- value = result->holder()->FastPropertyAt(
+ case FIELD: {
+ MaybeObject* maybe_result = result->holder()->FastPropertyAt(
+ result->representation(),
result->GetFieldIndex().field_index());
+ if (!maybe_result->To(&value)) return maybe_result;
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
+ }
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS:
@@ -1711,7 +1714,15 @@
MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
Name* name,
Object* value,
- int field_index) {
+ int field_index,
+ Representation representation) {
+ // This method is used to transition to a field. If we are transitioning to a
+ // double field, allocate new storage.
+ Object* storage;
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(GetHeap(), representation);
+ if (!maybe_storage->To(&storage)) return maybe_storage;
+
if (map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
FixedArray* values;
@@ -1721,8 +1732,11 @@
set_properties(values);
}
+
set_map(new_map);
- return FastPropertyAtPut(field_index, value);
+
+ FastPropertyAtPut(field_index, storage);
+ return value;
}
@@ -1774,8 +1788,8 @@
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(
- name, index, attributes, value->OptimalRepresentation(), 0);
+ Representation representation = value->OptimalRepresentation();
+ FieldDescriptor new_field(name, index, attributes, representation);
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
@@ -1783,6 +1797,7 @@
FixedArray* values = NULL;
+ // TODO(verwaest): Merge with AddFastPropertyUsingMap.
if (map()->unused_property_fields() == 0) {
// Make room for the new value
MaybeObject* maybe_values =
@@ -1792,10 +1807,17 @@
TransitionFlag flag = INSERT_TRANSITION;
+ Heap* heap = isolate->heap();
+
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ Object* storage;
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(heap, representation);
+ if (!maybe_storage->To(&storage)) return maybe_storage;
+
if (map()->unused_property_fields() == 0) {
ASSERT(values != NULL);
set_properties(values);
@@ -1805,7 +1827,9 @@
}
set_map(new_map);
- return FastPropertyAtPut(index, value);
+
+ FastPropertyAtPut(index, storage);
+ return value;
}
@@ -1814,7 +1838,7 @@
JSFunction* function,
PropertyAttributes attributes) {
// Allocate new instance descriptors with (name, function) added
- ConstantFunctionDescriptor d(name, function, attributes, 0);
+ ConstantFunctionDescriptor d(name, function, attributes);
TransitionFlag flag =
// Do not add transitions to global objects.
@@ -1850,8 +1874,7 @@
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
- PropertyDetails details = PropertyDetails(
- attributes, NORMAL, Representation::None(), index);
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
dict->SetNextEnumerationIndex(index + 1);
dict->SetEntry(entry, name, store_value, details);
return value;
@@ -1863,8 +1886,7 @@
}
JSGlobalPropertyCell::cast(store_value)->set_value(value);
}
- PropertyDetails details = PropertyDetails(
- attributes, NORMAL, Representation::None());
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
Object* result;
{ MaybeObject* maybe_result = dict->Add(name, store_value, details);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2005,8 +2027,7 @@
new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
}
- PropertyDetails new_details(
- attributes, NORMAL, Representation::None(), new_enumeration_index);
+ PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
return SetNormalizedProperty(name, value, new_details);
}
@@ -2071,9 +2092,9 @@
return ReplaceSlowProperty(name, new_value, attributes);
}
+ Representation representation = new_value->OptimalRepresentation();
int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(
- name, index, attributes, new_value->OptimalRepresentation(), 0);
+ FieldDescriptor new_field(name, index, attributes, representation);
// Make a new map for the object.
Map* new_map;
@@ -2091,6 +2112,12 @@
if (!maybe_new_properties->To(&new_properties)) return maybe_new_properties;
}
+ Heap* heap = GetHeap();
+ Object* storage;
+ MaybeObject* maybe_storage =
+ new_value->AllocateNewStorageFor(heap, representation);
+ if (!maybe_storage->To(&storage)) return maybe_storage;
+
// Update pointers to commit changes.
// Object points to the new map.
new_map->set_unused_property_fields(new_unused_property_fields);
@@ -2098,7 +2125,8 @@
if (new_properties != NULL) {
set_properties(new_properties);
}
- return FastPropertyAtPut(index, new_value);
+ FastPropertyAtPut(index, new_value);
+ return new_value;
}
@@ -2166,13 +2194,28 @@
}
-bool Map::InstancesNeedRewriting(int target_number_of_fields,
+bool Map::InstancesNeedRewriting(Map* target,
+ int target_number_of_fields,
int target_inobject,
int target_unused) {
// If fields were added (or removed), rewrite the instance.
int number_of_fields = NumberOfFields();
ASSERT(target_number_of_fields >= number_of_fields);
if (target_number_of_fields != number_of_fields) return true;
+
+ if (FLAG_track_double_fields) {
+ // If smi descriptors were replaced by double descriptors, rewrite.
+ DescriptorArray* old_desc = instance_descriptors();
+ DescriptorArray* new_desc = target->instance_descriptors();
+ int limit = NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ old_desc->GetDetails(i).representation().IsSmi()) {
+ return true;
+ }
+ }
+ }
+
// If no fields were added, and no inobject properties were removed, setting
// the map is sufficient.
if (target_inobject == inobject_properties()) return false;
@@ -2212,7 +2255,8 @@
int unused = new_map->unused_property_fields();
// Nothing to do if no functions were converted to fields.
- if (!old_map->InstancesNeedRewriting(number_of_fields, inobject, unused)) {
+ if (!old_map->InstancesNeedRewriting(
+ new_map, number_of_fields, inobject, unused)) {
set_map(new_map);
return this;
}
@@ -2235,7 +2279,21 @@
old_details.type() == FIELD);
Object* value = old_details.type() == CONSTANT_FUNCTION
? old_descriptors->GetValue(i)
- : FastPropertyAt(old_descriptors->GetFieldIndex(i));
+ : RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ if (FLAG_track_double_fields &&
+ old_details.representation().IsSmi() &&
+ details.representation().IsDouble()) {
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to migrate the instance.
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(heap, details.representation(), TENURED);
+ if (!maybe_storage->To(&value)) return maybe_storage;
+ }
+ ASSERT(!(FLAG_track_double_fields &&
+ details.representation().IsDouble() &&
+ value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
array->set(target_index, value);
@@ -2300,6 +2358,10 @@
new_map->instance_descriptors()->InitializeRepresentations(
Representation::Tagged());
+ if (FLAG_trace_generalization) {
+ PrintF("failed generalization %p -> %p\n",
+ static_cast<void*>(this), static_cast<void*>(new_map));
+ }
return new_map;
}
@@ -2460,12 +2522,23 @@
// Check the state of the root map.
DescriptorArray* updated_descriptors = updated->instance_descriptors();
+ int valid = updated->NumberOfOwnDescriptors();
+ if (updated_descriptors->IsMoreGeneralThan(
+ verbatim, valid, descriptors, old_descriptors)) {
+ Representation updated_representation =
+ updated_descriptors->GetDetails(modify_index).representation();
+ if (new_representation.fits_into(updated_representation)) {
+ if (FLAG_trace_generalization) {
+ PrintF("migrating to existing map %p -> %p\n",
+ static_cast<void*>(this), static_cast<void*>(updated));
+ }
+ return updated;
+ }
+ }
+
DescriptorArray* new_descriptors;
MaybeObject* maybe_descriptors = updated_descriptors->Merge(
- verbatim,
- updated->NumberOfOwnDescriptors(),
- descriptors,
- old_descriptors);
+ verbatim, valid, descriptors, old_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
old_reprepresentation =
@@ -2477,14 +2550,21 @@
verbatim, descriptors, new_descriptors);
int split_descriptors = split_map->NumberOfOwnDescriptors();
- // Check whether |split_map| matches what we were looking for. If so, return
- // it.
- if (descriptors == split_descriptors) return split_map;
+ // This is shadowed by |updated_descriptors| being more general than
+ // |old_descriptors|.
+ ASSERT(descriptors != split_descriptors);
int descriptor = split_descriptors;
split_map->DeprecateTarget(
old_descriptors->GetKey(descriptor), new_descriptors);
+ if (FLAG_trace_generalization) {
+ PrintF("migrating to new map %p -> %p (%i steps)\n",
+ static_cast<void*>(this),
+ static_cast<void*>(new_descriptors),
+ descriptors - descriptor);
+ }
+
Map* new_map = split_map;
// Add missing transitions.
for (; descriptor < descriptors; descriptor++) {
@@ -3027,7 +3107,7 @@
// occur as fields.
if (result->IsField() &&
result->IsReadOnly() &&
- FastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
+ RawFastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
result->DisallowCaching();
}
return;
@@ -3460,14 +3540,19 @@
}
-void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) {
+void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransitionToMap(*map));
+ object->AllocateStorageForMap(*map));
}
void JSObject::MigrateInstance(Handle<JSObject> object) {
+ if (FLAG_trace_migration) {
+ PrintF("migrating instance %p (%p)\n",
+ static_cast<void*>(*object),
+ static_cast<void*>(object->map()));
+ }
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
object->MigrateInstance());
@@ -3476,10 +3561,10 @@
Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
int modify_index,
- Representation new_representation) {
+ Representation representation) {
CALL_HEAP_FUNCTION(
map->GetIsolate(),
- map->GeneralizeRepresentation(modify_index, new_representation),
+ map->GeneralizeRepresentation(modify_index, representation),
Map);
}
@@ -3579,9 +3664,21 @@
lookup->holder()->GeneralizeFieldRepresentation(
lookup->GetDescriptorIndex(), value->OptimalRepresentation());
if (maybe_failure->IsFailure()) return maybe_failure;
+ DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
+ int descriptor = lookup->GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
}
- result = lookup->holder()->FastPropertyAtPut(
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage =
+ HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
+ lookup->GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ result = *value;
+ break;
+ }
+ lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
+ result = *value;
break;
}
case CONSTANT_FUNCTION:
@@ -3610,7 +3707,8 @@
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
- if (!value->FitsRepresentation(details.representation())) {
+ Representation representation = details.representation();
+ if (!value->FitsRepresentation(representation)) {
MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
descriptor, value->OptimalRepresentation());
if (!maybe_map->To(&transition_map)) return maybe_map;
@@ -3620,10 +3718,13 @@
lookup->holder()->MigrateToMap(Map::cast(back));
if (maybe_failure->IsFailure()) return maybe_failure;
}
+ DescriptorArray* desc = transition_map->instance_descriptors();
+ int descriptor = transition_map->LastAdded();
+ representation = desc->GetDetails(descriptor).representation();
}
int field_index = descriptors->GetFieldIndex(descriptor);
result = lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
+ transition_map, *name, *value, field_index, representation);
} else {
result = lookup->holder()->ConvertDescriptorToField(
*name, *value, attributes);
@@ -3753,8 +3854,7 @@
MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL: {
- PropertyDetails details = PropertyDetails(
- attributes, NORMAL, Representation::None());
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
result = self->SetNormalizedProperty(*name, *value, details);
break;
}
@@ -3764,9 +3864,20 @@
MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
lookup.GetDescriptorIndex(), value->OptimalRepresentation());
if (maybe_failure->IsFailure()) return maybe_failure;
+ DescriptorArray* desc = self->map()->instance_descriptors();
+ int descriptor = lookup.GetDescriptorIndex();
+ representation = desc->GetDetails(descriptor).representation();
}
- result = self->FastPropertyAtPut(
- lookup.GetFieldIndex().field_index(), *value);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ HeapNumber* storage =
+ HeapNumber::cast(self->RawFastPropertyAt(
+ lookup.GetFieldIndex().field_index()));
+ storage->set_value(value->Number());
+ result = *value;
+ break;
+ }
+ self->FastPropertyAtPut(lookup.GetFieldIndex().field_index(), *value);
+ result = *value;
break;
}
case CONSTANT_FUNCTION:
@@ -3791,7 +3902,8 @@
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
- if (!value->FitsRepresentation(details.representation())) {
+ Representation representation = details.representation();
+ if (!value->FitsRepresentation(representation)) {
MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
descriptor, value->OptimalRepresentation());
if (!maybe_map->To(&transition_map)) return maybe_map;
@@ -3800,10 +3912,13 @@
MaybeObject* maybe_failure = self->MigrateToMap(Map::cast(back));
if (maybe_failure->IsFailure()) return maybe_failure;
}
+ DescriptorArray* desc = transition_map->instance_descriptors();
+ int descriptor = transition_map->LastAdded();
+ representation = desc->GetDetails(descriptor).representation();
}
int field_index = descriptors->GetFieldIndex(descriptor);
result = self->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
+ transition_map, *name, *value, field_index, representation);
} else {
result = self->ConvertDescriptorToField(*name, *value, attributes);
}
@@ -4213,10 +4328,8 @@
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT_FUNCTION: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- Representation::None(),
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), NORMAL, i + 1);
Object* value = descs->GetConstantFunction(i);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
@@ -4224,11 +4337,9 @@
break;
}
case FIELD: {
- PropertyDetails d = PropertyDetails(details.attributes(),
- NORMAL,
- Representation::None(),
- details.descriptor_index());
- Object* value = FastPropertyAt(descs->GetFieldIndex(i));
+ PropertyDetails d =
+ PropertyDetails(details.attributes(), NORMAL, i + 1);
+ Object* value = RawFastPropertyAt(descs->GetFieldIndex(i));
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
@@ -4236,10 +4347,8 @@
}
case CALLBACKS: {
Object* value = descs->GetCallbacksObject(i);
- PropertyDetails d = PropertyDetails(details.attributes(),
- CALLBACKS,
- Representation::None(),
- details.descriptor_index());
+ PropertyDetails d = PropertyDetails(
+ details.attributes(), CALLBACKS, i + 1);
MaybeObject* maybe_dictionary =
dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
@@ -4375,8 +4484,7 @@
ASSERT(old_map->has_fast_smi_or_object_elements());
value = FixedArray::cast(array)->get(i);
}
- PropertyDetails details = PropertyDetails(
- NONE, NORMAL, Representation::None());
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
if (!value->IsTheHole()) {
Object* result;
MaybeObject* maybe_result =
@@ -4609,8 +4717,10 @@
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- inline_value =
- this->FastPropertyAt(descriptors->GetFieldIndex(sorted_index));
+ MaybeObject* maybe_value = this->FastPropertyAt(
+ descriptors->GetDetails(sorted_index).representation(),
+ descriptors->GetFieldIndex(sorted_index));
+ if (!maybe_value->To(&inline_value)) return maybe_value;
} else {
inline_value = GetHeap()->undefined_value();
}
@@ -4679,8 +4789,7 @@
if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
- this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
- value);
+ FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), value);
return this;
}
}
@@ -5156,6 +5265,11 @@
StackLimitCheck check(isolate);
if (check.HasOverflowed()) return isolate->StackOverflow();
+ if (map()->is_deprecated()) {
+ MaybeObject* maybe_failure = MigrateInstance();
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+
Heap* heap = isolate->heap();
Object* result;
{ MaybeObject* maybe_result = heap->CopyJSObject(this);
@@ -5165,27 +5279,24 @@
// Deep copy local properties.
if (copy->HasFastProperties()) {
- FixedArray* properties = copy->properties();
- for (int i = 0; i < properties->length(); i++) {
- Object* value = properties->get(i);
+ DescriptorArray* descriptors = copy->map()->instance_descriptors();
+ int limit = copy->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ int index = descriptors->GetFieldIndex(i);
+ Object* value = RawFastPropertyAt(index);
if (value->IsJSObject()) {
JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- properties->set(i, result);
+ MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
+ if (!maybe_copy->To(&value)) return maybe_copy;
+ } else {
+ Representation representation = details.representation();
+ MaybeObject* maybe_storage =
+ value->AllocateNewStorageFor(heap, representation);
+ if (!maybe_storage->To(&value)) return maybe_storage;
}
- }
- int nof = copy->map()->inobject_properties();
- for (int i = 0; i < nof; i++) {
- Object* value = copy->InObjectPropertyAt(i);
- if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- copy->InObjectPropertyAtPut(i, result);
- }
+ copy->FastPropertyAtPut(index, value);
}
} else {
{ MaybeObject* maybe_result =
@@ -5447,8 +5558,7 @@
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(
entry,
- PropertyDetails(
- attributes, CALLBACKS, Representation::None(), index));
+ PropertyDetails(attributes, CALLBACKS, index));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
@@ -5609,8 +5719,7 @@
MaybeObject* JSObject::SetElementCallback(uint32_t index,
Object* structure,
PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(
- attributes, CALLBACKS, Representation::None());
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
// Normalize elements to make this operation simple.
SeededNumberDictionary* dictionary;
@@ -5668,8 +5777,7 @@
}
// Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(
- attributes, CALLBACKS, Representation::None());
+ PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
maybe_ok = SetNormalizedProperty(name, structure, details);
if (maybe_ok->IsFailure()) return maybe_ok;
@@ -6013,7 +6121,14 @@
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
if (descs->GetType(i) == FIELD) {
- if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
+ Object* property = RawFastPropertyAt(descs->GetFieldIndex(i));
+ if (FLAG_track_double_fields &&
+ descs->GetDetails(i).representation().IsDouble()) {
+ ASSERT(property->IsHeapNumber());
+ if (value->IsNumber() && property->Number() == value->Number()) {
+ return descs->GetKey(i);
+ }
+ } else if (property == value) {
return descs->GetKey(i);
}
} else if (descs->GetType(i) == CONSTANT_FUNCTION) {
@@ -6043,6 +6158,7 @@
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+ new_bit_field3 = Deprecated::update(new_bit_field3, false);
result->set_bit_field3(new_bit_field3);
return result;
}
@@ -6192,6 +6308,8 @@
set_transitions(transitions);
result->SetBackPointer(this);
+ } else {
+ descriptors->InitializeRepresentations(Representation::Tagged());
}
return result;
@@ -6229,6 +6347,8 @@
set_transitions(transitions);
result->SetBackPointer(this);
+ } else {
+ descriptors->InitializeRepresentations(Representation::Tagged());
}
return result;
@@ -6303,8 +6423,6 @@
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
- new_descriptors->InitializeRepresentations(Representation::Tagged());
-
return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
}
@@ -6331,7 +6449,6 @@
int old_size = NumberOfOwnDescriptors();
int new_size = old_size + 1;
- descriptor->SetEnumerationIndex(new_size);
if (flag == INSERT_TRANSITION &&
owns_descriptors() &&
@@ -6416,9 +6533,7 @@
int new_size = NumberOfOwnDescriptors();
ASSERT(0 <= insertion_index && insertion_index < new_size);
- PropertyDetails details = descriptors->GetDetails(insertion_index);
- ASSERT_LE(details.descriptor_index(), new_size);
- descriptor->SetEnumerationIndex(details.descriptor_index());
+ ASSERT_LT(insertion_index, new_size);
DescriptorArray* new_descriptors;
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(new_size);
@@ -7260,7 +7375,7 @@
}
-// Generalize the |other| descriptor array by merging it with the (at least
+// Generalize the |other| descriptor array by merging it into the (at least
// partly) updated |this| descriptor array.
// The method merges two descriptor array in three parts. Both descriptor arrays
// are identical up to |verbatim|. They also overlap in keys up to |valid|.
@@ -7280,7 +7395,7 @@
// Allocate a new descriptor array large enough to hold the required
// descriptors, with minimally the exact same size as this descriptor array.
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
- new_size, Max(new_size, number_of_descriptors()) - new_size);
+ new_size, Max(new_size, other->number_of_descriptors()) - new_size);
if (!maybe_descriptors->To(&result)) return maybe_descriptors;
ASSERT(result->length() > length() ||
result->NumberOfSlackDescriptors() > 0 ||
@@ -7303,7 +7418,6 @@
Name* key = GetKey(descriptor);
PropertyDetails details = GetDetails(descriptor);
PropertyDetails other_details = other->GetDetails(descriptor);
- ASSERT(details.attributes() == other_details.attributes());
if (details.type() == FIELD || other_details.type() == FIELD ||
(details.type() == CONSTANT_FUNCTION &&
@@ -7313,9 +7427,8 @@
details.representation().generalize(other_details.representation());
FieldDescriptor d(key,
current_offset++,
- details.attributes(),
- representation,
- descriptor + 1);
+ other_details.attributes(),
+ representation);
result->Set(descriptor, &d, witness);
} else {
result->CopyFrom(descriptor, other, descriptor, witness);
@@ -7330,8 +7443,7 @@
FieldDescriptor d(key,
current_offset++,
details.attributes(),
- details.representation(),
- descriptor + 1);
+ details.representation());
result->Set(descriptor, &d, witness);
} else {
result->CopyFrom(descriptor, other, descriptor, witness);
@@ -7343,6 +7455,37 @@
}
+// Checks whether a merge of |other| into |this| would return a copy of |this|.
+bool DescriptorArray::IsMoreGeneralThan(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+ if (valid != new_size) return false;
+
+ for (int descriptor = verbatim; descriptor < valid; descriptor++) {
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+ if (details.type() != other_details.type()) {
+ if (details.type() != FIELD ||
+ other_details.type() != CONSTANT_FUNCTION) {
+ return false;
+ }
+ } else if (details.type() == CONSTANT_FUNCTION) {
+ if (GetValue(descriptor) != other->GetValue(descriptor)) {
+ return false;
+ }
+ } else if (!other_details.representation().fits_into(
+ details.representation())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
@@ -9747,6 +9890,19 @@
}
+Name* Code::FindFirstName() {
+ ASSERT(is_inline_cache_stub());
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsName()) return Name::cast(object);
+ }
+ return NULL;
+}
+
+
void Code::ClearInlineCaches() {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
@@ -11152,8 +11308,8 @@
// is read-only (a declared const that has not been initialized). If a
// value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(attributes, NORMAL, Representation::None(),
- details.dictionary_index());
+ details = PropertyDetails(
+ attributes, NORMAL, details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
@@ -11205,8 +11361,7 @@
}
}
FixedArrayBase* new_dictionary;
- PropertyDetails details = PropertyDetails(
- attributes, NORMAL, Representation::None());
+ PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
if (!maybe->To(&new_dictionary)) return maybe;
if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
@@ -13200,8 +13355,7 @@
}
uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(
- NONE, NORMAL, Representation::None());
+ PropertyDetails no_details = PropertyDetails(NONE, NORMAL, 0);
Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
@@ -13403,6 +13557,33 @@
}
+size_t JSTypedArray::element_size() {
+ switch (elements()->map()->instance_type()) {
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return 1;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return 1;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return 2;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return 2;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return 4;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return 8;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return 1;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+
Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
uint8_t clamped_value = 0;
if (index < static_cast<uint32_t>(length())) {
@@ -13584,7 +13765,7 @@
heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
- PropertyDetails details(NONE, NORMAL, Representation::None());
+ PropertyDetails details(NONE, NORMAL, 0);
details = details.AsDeleted();
Object* dictionary;
{ MaybeObject* maybe_dictionary =
@@ -14027,8 +14208,7 @@
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
PropertyDetails details = DetailsAt(i);
PropertyDetails new_details = PropertyDetails(
- details.attributes(), details.type(),
- Representation::None(), enum_index);
+ details.attributes(), details.type(), enum_index);
DetailsAtPut(i, new_details);
}
}
@@ -14094,8 +14274,7 @@
{ MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
if (!maybe_k->ToObject(&k)) return maybe_k;
}
- PropertyDetails details = PropertyDetails(
- NONE, NORMAL, Representation::None());
+ PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
Dictionary<Shape, Key>::Hash(key));
@@ -14106,8 +14285,6 @@
MaybeObject* Dictionary<Shape, Key>::Add(Key key,
Object* value,
PropertyDetails details) {
- ASSERT(details.dictionary_index() == details.descriptor_index());
-
// Valdate key is absent.
SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
@@ -14141,8 +14318,7 @@
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = NextEnumerationIndex();
- details = PropertyDetails(details.attributes(), details.type(),
- Representation::None(), index);
+ details = PropertyDetails(details.attributes(), details.type(), index);
SetNextEnumerationIndex(index + 1);
}
SetEntry(entry, k, value, details);
@@ -14184,7 +14360,7 @@
MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value) {
SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL, Representation::None()));
+ return Add(key, value, PropertyDetails(NONE, NORMAL, 0));
}
@@ -14229,7 +14405,6 @@
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
- Representation::None(),
DetailsAt(entry).dictionary_index());
MaybeObject* maybe_object_key =
SeededNumberDictionaryShape::AsObject(GetHeap(), key);
@@ -14482,15 +14657,13 @@
}
PropertyDetails details = DetailsAt(i);
- ASSERT(details.descriptor_index() == details.dictionary_index());
- int enumeration_index = details.descriptor_index();
+ int enumeration_index = details.dictionary_index();
PropertyType type = details.type();
if (value->IsJSFunction()) {
ConstantFunctionDescriptor d(key,
JSFunction::cast(value),
- details.attributes(),
- enumeration_index);
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == NORMAL) {
if (current_offset < inobject_props) {
@@ -14505,14 +14678,12 @@
current_offset++,
details.attributes(),
// TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged(),
- enumeration_index);
+ Representation::Tagged());
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
CallbacksDescriptor d(key,
value,
- details.attributes(),
- enumeration_index);
+ details.attributes());
descriptors->Set(enumeration_index - 1, &d, witness);
} else {
UNREACHABLE();
diff --git a/src/objects.h b/src/objects.h
index 4b5b51d..1b4ed5b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1080,6 +1080,10 @@
return true;
}
+ inline MaybeObject* AllocateNewStorageFor(Heap* heap,
+ Representation representation,
+ PretenureFlag tenure = NOT_TENURED);
+
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
inline bool HasValidElements();
@@ -1828,8 +1832,8 @@
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
- static void TransitionToMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* TransitionToMap(Map* map);
+ static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
+ inline MUST_USE_RESULT MaybeObject* AllocateStorageForMap(Map* map);
static void MigrateInstance(Handle<JSObject> instance);
inline MUST_USE_RESULT MaybeObject* MigrateInstance();
@@ -2135,10 +2139,12 @@
// Add a property to a fast-case object using a map transition to
// new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map,
- Name* name,
- Object* value,
- int field_index);
+ MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(
+ Map* new_map,
+ Name* name,
+ Object* value,
+ int field_index,
+ Representation representation);
// Add a constant function property to a fast-case object.
// This leaves a CONSTANT_TRANSITION in the old map, and
@@ -2247,8 +2253,11 @@
int unused_property_fields);
// Access fast-case object properties at index.
- inline Object* FastPropertyAt(int index);
- inline Object* FastPropertyAtPut(int index, Object* value);
+ MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
+ Representation representation,
+ int index);
+ inline Object* RawFastPropertyAt(int index);
+ inline void FastPropertyAtPut(int index, Object* value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
@@ -2809,6 +2818,11 @@
int new_size,
DescriptorArray* other);
+ bool IsMoreGeneralThan(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other);
+
MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
// Sort the instance descriptors by the hash codes of their keys.
@@ -4624,6 +4638,9 @@
Code* FindFirstCode();
void FindAllCode(CodeHandleList* code_list, int length);
+ // Find the first name in an IC stub.
+ Name* FindFirstName();
+
class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
@@ -5191,7 +5208,8 @@
int NumberOfFields();
- bool InstancesNeedRewriting(int target_number_of_fields,
+ bool InstancesNeedRewriting(Map* target,
+ int target_number_of_fields,
int target_inobject,
int target_unused);
static Handle<Map> GeneralizeRepresentation(
@@ -5344,6 +5362,12 @@
inline void deprecate();
inline bool is_deprecated();
inline bool CanBeDeprecated();
+ // Returns a non-deprecated version of the input. If the input was not
+ // deprecated, it is directly returned. Otherwise, the non-deprecated version
+ // is found by re-transitioning from the root of the transition tree using the
+ // descriptor array of the map. New maps (and transitions) may be created if
+ // no new (more general) version exists.
+ static inline Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
@@ -6396,9 +6420,14 @@
inline int continuation();
inline void set_continuation(int continuation);
- // [operands]: Saved operand stack.
+ // [operand_stack]: Saved operand stack.
DECL_ACCESSORS(operand_stack, FixedArray)
+ // [stack_handler_index]: Index of first stack handler in operand_stack, or -1
+ // if the captured activation had no stack handler.
+ inline int stack_handler_index();
+ inline void set_stack_handler_index(int stack_handler_index);
+
// Casting.
static inline JSGeneratorObject* cast(Object* obj);
@@ -6416,11 +6445,24 @@
static const int kReceiverOffset = kContextOffset + kPointerSize;
static const int kContinuationOffset = kReceiverOffset + kPointerSize;
static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
- static const int kSize = kOperandStackOffset + kPointerSize;
+ static const int kStackHandlerIndexOffset =
+ kOperandStackOffset + kPointerSize;
+ static const int kSize = kStackHandlerIndexOffset + kPointerSize;
// Resume mode, for use by runtime functions.
enum ResumeMode { SEND, THROW };
+ // Yielding from a generator returns an object with the following inobject
+ // properties. See Context::generator_result_map() for the map.
+ static const int kResultValuePropertyIndex = 0;
+ static const int kResultDonePropertyIndex = 1;
+ static const int kResultPropertyCount = 2;
+
+ static const int kResultValuePropertyOffset = JSObject::kHeaderSize;
+ static const int kResultDonePropertyOffset =
+ kResultValuePropertyOffset + kPointerSize;
+ static const int kResultSize = kResultDonePropertyOffset + kPointerSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
@@ -8708,6 +8750,7 @@
static inline JSTypedArray* cast(Object* obj);
ExternalArrayType type();
+ size_t element_size();
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
diff --git a/src/parser.cc b/src/parser.cc
index 33b5fab..cff51bc 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -3738,33 +3738,6 @@
}
-void Parser::BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* values,
- Handle<FixedArray> literals,
- bool* is_simple,
- int* depth) {
- // Fill in the literals.
- // Accumulate output values in local variables.
- bool is_simple_acc = true;
- int depth_acc = 1;
- for (int i = 0; i < values->length(); i++) {
- MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
- if (m_literal != NULL && m_literal->depth() >= depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
- Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
- if (boilerplate_value->IsUndefined()) {
- literals->set_the_hole(i);
- is_simple_acc = false;
- } else {
- literals->set(i, *boilerplate_value);
- }
- }
-
- *is_simple = is_simple_acc;
- *depth = depth_acc;
-}
-
-
Expression* Parser::ParseArrayLiteral(bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -3991,7 +3964,8 @@
Handle<FixedArray> constant_properties,
bool* is_simple,
bool* fast_elements,
- int* depth) {
+ int* depth,
+ bool* may_store_doubles) {
int position = 0;
// Accumulate the value in local variables and store it at the end.
bool is_simple_acc = true;
@@ -4014,6 +3988,13 @@
// runtime. The enumeration order is maintained.
Handle<Object> key = property->key()->handle();
Handle<Object> value = GetBoilerplateValue(property->value());
+
+ // Ensure objects with doubles are always treated as nested objects.
+ // TODO(verwaest): Remove once we can store them inline.
+ if (FLAG_track_double_fields && value->IsNumber()) {
+ *may_store_doubles = true;
+ }
+
is_simple_acc = is_simple_acc && !value->IsUndefined();
// Keep track of the number of elements in the object literal and
@@ -4215,17 +4196,20 @@
bool is_simple = true;
bool fast_elements = true;
int depth = 1;
+ bool may_store_doubles = false;
BuildObjectLiteralConstantProperties(properties,
constant_properties,
&is_simple,
&fast_elements,
- &depth);
+ &depth,
+ &may_store_doubles);
return factory()->NewObjectLiteral(constant_properties,
properties,
literal_index,
is_simple,
fast_elements,
depth,
+ may_store_doubles,
has_function);
}
diff --git a/src/parser.h b/src/parser.h
index acf47bb..1defbf2 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -692,13 +692,8 @@
Handle<FixedArray> constants,
bool* is_simple,
bool* fast_elements,
- int* depth);
-
- // Populate the literals fixed array for a materialized array literal.
- void BuildArrayLiteralBoilerplateLiterals(ZoneList<Expression*>* properties,
- Handle<FixedArray> constants,
- bool* is_simple,
- int* depth);
+ int* depth,
+ bool* may_store_doubles);
// Decide if a property should be in the object boilerplate.
bool IsBoilerplateProperty(ObjectLiteral::Property* property);
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 4660c0f..3a1eca7 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -628,11 +628,7 @@
class IndentedScope BASE_EMBEDDED {
public:
- explicit IndentedScope(AstPrinter* printer) : ast_printer_(printer) {
- ast_printer_->inc_indent();
- }
-
- IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
+ IndentedScope(AstPrinter* printer, const char* txt)
: ast_printer_(printer) {
ast_printer_->PrintIndented(txt);
ast_printer_->Print("\n");
@@ -693,21 +689,16 @@
}
-void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
- if (labels != NULL && labels->length() > 0) {
- PrintIndented(info == NULL ? "LABELS" : info);
- Print(" ");
- PrintLabels(labels);
- Print("\n");
- } else if (info != NULL) {
- PrintIndented(info);
- Print("\n");
- }
+void AstPrinter::PrintLabelsIndented(ZoneStringList* labels) {
+ if (labels == NULL || labels->length() == 0) return;
+ PrintIndented("LABELS ");
+ PrintLabels(labels);
+ Print("\n");
}
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s, node);
+ IndentedScope indent(this, s);
Visit(node);
}
@@ -779,6 +770,7 @@
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
node->proxy()->var(),
@@ -786,6 +778,7 @@
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
PrintIndented("FUNCTION ");
PrintLiteral(node->proxy()->name(), true);
@@ -816,19 +809,21 @@
void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
+ IndentedScope indent(this, "MODULE LITERAL");
VisitBlock(node->body());
}
void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
+ IndentedScope indent(this, "MODULE VARIABLE");
Visit(node->proxy());
}
void AstPrinter::VisitModulePath(ModulePath* node) {
- IndentedScope indent(this, "PATH");
- PrintIndentedVisit("MODULE", node->module());
- PrintLiteralIndented("NAME", node->name(), false);
+ IndentedScope indent(this, "MODULE PATH");
+ PrintIndentedVisit("MODULE PATH PARENT", node->module());
+ PrintLiteralIndented("NAME", node->name(), true);
}
@@ -838,24 +833,26 @@
void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
- IndentedScope indent(this, "MODULE");
+ IndentedScope indent(this, "MODULE STATEMENT");
PrintLiteralIndented("NAME", node->proxy()->name(), true);
PrintStatements(node->body()->statements());
}
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
+ IndentedScope indent(this, "EXPRESSION STATEMENT");
Visit(node->expression());
}
void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
- PrintIndented("EMPTY\n");
+ IndentedScope indent(this, "EMPTY");
}
void AstPrinter::VisitIfStatement(IfStatement* node) {
- PrintIndentedVisit("IF", node->condition());
+ IndentedScope indent(this, "IF");
+ PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_statement());
if (node->HasElseStatement()) {
PrintIndentedVisit("ELSE", node->else_statement());
@@ -864,17 +861,20 @@
void AstPrinter::VisitContinueStatement(ContinueStatement* node) {
- PrintLabelsIndented("CONTINUE", node->target()->labels());
+ IndentedScope indent(this, "CONTINUE");
+ PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitBreakStatement(BreakStatement* node) {
- PrintLabelsIndented("BREAK", node->target()->labels());
+ IndentedScope indent(this, "BREAK");
+ PrintLabelsIndented(node->target()->labels());
}
void AstPrinter::VisitReturnStatement(ReturnStatement* node) {
- PrintIndentedVisit("RETURN", node->expression());
+ IndentedScope indent(this, "RETURN");
+ Visit(node->expression());
}
@@ -887,7 +887,7 @@
void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
IndentedScope indent(this, "SWITCH");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
PrintCaseClause(node->cases()->at(i));
@@ -897,7 +897,7 @@
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
}
@@ -905,7 +905,7 @@
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
}
@@ -913,7 +913,7 @@
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR");
- PrintLabelsIndented(NULL, node->labels());
+ PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -972,12 +972,13 @@
void AstPrinter::VisitConditional(Conditional* node) {
IndentedScope indent(this, "CONDITIONAL");
- PrintIndentedVisit("?", node->condition());
+ PrintIndentedVisit("CONDITION", node->condition());
PrintIndentedVisit("THEN", node->then_expression());
PrintIndentedVisit("ELSE", node->else_expression());
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitLiteral(Literal* node) {
PrintLiteralIndented("LITERAL", node->handle(), true);
}
@@ -1034,6 +1035,7 @@
}
+// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
EmbeddedVector<char, 128> buf;
@@ -1059,24 +1061,26 @@
void AstPrinter::VisitAssignment(Assignment* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->target());
Visit(node->value());
}
void AstPrinter::VisitYield(Yield* node) {
- PrintIndentedVisit("YIELD", node->expression());
+ IndentedScope indent(this, "YIELD");
+ Visit(node->expression());
}
void AstPrinter::VisitThrow(Throw* node) {
- PrintIndentedVisit("THROW", node->exception());
+ IndentedScope indent(this, "THROW");
+ Visit(node->exception());
}
void AstPrinter::VisitProperty(Property* node) {
- IndentedScope indent(this, "PROPERTY", node);
+ IndentedScope indent(this, "PROPERTY");
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
if (literal != NULL && literal->handle()->IsInternalizedString()) {
@@ -1102,14 +1106,15 @@
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
- PrintLiteralIndented("CALL RUNTIME ", node->name(), false);
- IndentedScope indent(this);
+ IndentedScope indent(this, "CALL RUNTIME");
+ PrintLiteralIndented("NAME", node->name(), false);
PrintArguments(node->arguments());
}
void AstPrinter::VisitUnaryOperation(UnaryOperation* node) {
- PrintIndentedVisit(Token::Name(node->op()), node->expression());
+ IndentedScope indent(this, Token::Name(node->op()));
+ Visit(node->expression());
}
@@ -1117,19 +1122,20 @@
EmbeddedVector<char, 128> buf;
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
- PrintIndentedVisit(buf.start(), node->expression());
+ IndentedScope indent(this, buf.start());
+ Visit(node->expression());
}
void AstPrinter::VisitBinaryOperation(BinaryOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->left());
Visit(node->right());
}
void AstPrinter::VisitCompareOperation(CompareOperation* node) {
- IndentedScope indent(this, Token::Name(node->op()), node);
+ IndentedScope indent(this, Token::Name(node->op()));
Visit(node->left());
Visit(node->right());
}
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 41175ab..6657ecd 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -106,7 +106,7 @@
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value);
- void PrintLabelsIndented(const char* info, ZoneStringList* labels);
+ void PrintLabelsIndented(ZoneStringList* labels);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
diff --git a/src/property-details.h b/src/property-details.h
index dc912c8..62140fe 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -99,16 +99,25 @@
static Representation FromKind(Kind kind) { return Representation(kind); }
- bool Equals(const Representation& other) {
+ bool Equals(const Representation& other) const {
return kind_ == other.kind_;
}
- bool is_more_general_than(const Representation& other) {
+ bool IsCompatibleForLoad(const Representation& other) const {
+ return (IsDouble() && other.IsDouble()) ||
+ (!IsDouble() && !other.IsDouble());
+ }
+
+ bool is_more_general_than(const Representation& other) const {
ASSERT(kind_ != kExternal);
ASSERT(other.kind_ != kExternal);
return kind_ > other.kind_;
}
+ bool fits_into(const Representation& other) const {
+ return other.is_more_general_than(*this) || other.Equals(*this);
+ }
+
Representation generalize(Representation other) {
if (is_more_general_than(other)) {
return *this;
@@ -145,20 +154,21 @@
public:
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
- Representation representation,
- int index = 0) {
+ int index) {
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
- | RepresentationField::encode(EncodeRepresentation(representation))
| DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- if (representation.IsNone()) {
- ASSERT(index == this->dictionary_index());
- } else {
- ASSERT(index == this->descriptor_index());
- }
+ }
+
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ Representation representation) {
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | RepresentationField::encode(EncodeRepresentation(representation));
}
int pointer() { return DescriptorPointer::decode(value_); }
@@ -174,17 +184,10 @@
inline Smi* AsSmi();
static uint8_t EncodeRepresentation(Representation representation) {
- ASSERT(representation.kind() <= Representation::kTagged);
- if (representation.kind() < Representation::kInteger32) {
- return representation.kind();
- } else {
- return representation.kind() - 1;
- }
+ return representation.kind();
}
static Representation DecodeRepresentation(uint32_t bits) {
- ASSERT(bits <= Representation::kTagged);
- if (bits >= Representation::kInteger32) bits += 1;
return Representation::FromKind(static_cast<Representation::Kind>(bits));
}
@@ -198,10 +201,6 @@
return DictionaryStorageField::decode(value_);
}
- int descriptor_index() {
- return DescriptorStorageField::decode(value_);
- }
-
Representation representation() {
return DecodeRepresentation(RepresentationField::decode(value_));
}
@@ -223,9 +222,8 @@
class AttributesField: public BitField<PropertyAttributes, 3, 3> {};
class DeletedField: public BitField<uint32_t, 6, 1> {};
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
- class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
- class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
- class RepresentationField: public BitField<uint32_t, 29, 2> {};
+ class DescriptorPointer: public BitField<uint32_t, 7, 11> {};
+ class RepresentationField: public BitField<uint32_t, 18, 3> {};
static const int kInitialIndex = 1;
diff --git a/src/property.cc b/src/property.cc
index cbf2fc8..80a06cb 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -112,7 +112,6 @@
GetKey()->ShortPrint(out);
PrintF(out, " @ ");
GetValue()->ShortPrint(out);
- PrintF(out, " %d\n", GetDetails().descriptor_index());
}
diff --git a/src/property.h b/src/property.h
index 2292419..606f111 100644
--- a/src/property.h
+++ b/src/property.h
@@ -64,11 +64,6 @@
void Print(FILE* out);
#endif
- void SetEnumerationIndex(int index) {
- details_ = PropertyDetails(details_.attributes(), details_.type(),
- details_.representation(), index);
- }
-
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
private:
@@ -94,11 +89,10 @@
Object* value,
PropertyAttributes attributes,
PropertyType type,
- Representation representation,
- int index)
+ Representation representation)
: key_(key),
value_(value),
- details_(attributes, type, representation, index) { }
+ details_(attributes, type, representation) { }
friend class DescriptorArray;
};
@@ -109,10 +103,9 @@
FieldDescriptor(Name* key,
int field_index,
PropertyAttributes attributes,
- Representation representation,
- int index = 0)
+ Representation representation)
: Descriptor(key, Smi::FromInt(field_index), attributes,
- FIELD, representation, index) {}
+ FIELD, representation) {}
};
@@ -120,10 +113,9 @@
public:
ConstantFunctionDescriptor(Name* key,
JSFunction* function,
- PropertyAttributes attributes,
- int index)
- : Descriptor(key, function, attributes,
- CONSTANT_FUNCTION, Representation::Tagged(), index) {}
+ PropertyAttributes attributes)
+ : Descriptor(key, function, attributes, CONSTANT_FUNCTION,
+ Representation::Tagged()) {}
};
@@ -131,10 +123,9 @@
public:
CallbacksDescriptor(Name* key,
Object* foreign,
- PropertyAttributes attributes,
- int index = 0)
+ PropertyAttributes attributes)
: Descriptor(key, foreign, attributes, CALLBACKS,
- Representation::Tagged(), index) {}
+ Representation::Tagged()) {}
};
@@ -268,6 +259,8 @@
Representation representation() {
ASSERT(IsFound());
+ ASSERT(!IsTransition());
+ ASSERT(details_.type() != NONEXISTENT);
return details_.representation();
}
@@ -355,7 +348,7 @@
Object* GetLazyValue() {
switch (type()) {
case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex().field_index());
+ return holder()->RawFastPropertyAt(GetFieldIndex().field_index());
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
diff --git a/src/runtime.cc b/src/runtime.cc
index 6be77a9..4e00b29 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -28,6 +28,8 @@
#include <stdlib.h>
#include <limits>
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "accessors.h"
@@ -232,7 +234,9 @@
constant_properties,
&is_result_from_cache);
- Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
+ Handle<JSObject> boilerplate =
+ isolate->factory()->NewJSObjectFromMap(
+ map, isolate->heap()->GetPretenureMode());
// Normalize the elements of the boilerplate to save space if needed.
if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
@@ -336,8 +340,10 @@
// Create the JSArray.
Handle<JSFunction> constructor(
JSFunction::NativeContextFromLiterals(*literals)->array_function());
- Handle<JSArray> object =
- Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
+
+ Handle<JSArray> object = Handle<JSArray>::cast(
+ isolate->factory()->NewJSObject(
+ constructor, isolate->heap()->GetPretenureMode()));
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
@@ -647,11 +653,11 @@
static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
- Persistent<Value> object,
+ Persistent<Value>* object,
void* data) {
Isolate* isolate = reinterpret_cast<Isolate*>(external_isolate);
HandleScope scope(isolate);
- Handle<Object> internal_object = Utils::OpenHandle(*object);
+ Handle<Object> internal_object = Utils::OpenHandle(**object);
size_t allocated_length = NumberToSize(
isolate, JSArrayBuffer::cast(*internal_object)->byte_length());
@@ -659,7 +665,7 @@
-static_cast<intptr_t>(allocated_length));
if (data != NULL)
free(data);
- object.Dispose(external_isolate);
+ object->Dispose(external_isolate);
}
@@ -774,7 +780,8 @@
ARRAY_ID_UINT32 = 5,
ARRAY_ID_INT32 = 6,
ARRAY_ID_FLOAT32 = 7,
- ARRAY_ID_FLOAT64 = 8
+ ARRAY_ID_FLOAT64 = 8,
+ ARRAY_ID_UINT8C = 9
};
@@ -831,6 +838,11 @@
arrayType = kExternalDoubleArray;
elementSize = 8;
break;
+ case ARRAY_ID_UINT8C:
+ elementsKind = EXTERNAL_PIXEL_ELEMENTS;
+ arrayType = kExternalPixelArray;
+ elementSize = 1;
+ break;
default:
UNREACHABLE();
return NULL;
@@ -861,11 +873,15 @@
#define TYPED_ARRAY_GETTER(getter, accessor) \
- RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
- HandleScope scope(isolate); \
- ASSERT(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0); \
- return holder->accessor(); \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(Object, holder, 0); \
+ if (!holder->IsJSTypedArray()) \
+ return isolate->Throw(*isolate->factory()->NewTypeError( \
+ "not_typed_array", HandleVector<Object>(NULL, 0))); \
+ Handle<JSTypedArray> typed_array(JSTypedArray::cast(*holder)); \
+ return typed_array->accessor(); \
}
TYPED_ARRAY_GETTER(Buffer, buffer)
@@ -875,6 +891,128 @@
#undef TYPED_ARRAY_GETTER
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArraySetFastCases) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source_obj, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, offset_obj, 2);
+
+ if (!target_obj->IsJSTypedArray())
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "not_typed_array", HandleVector<Object>(NULL, 0)));
+
+ if (!source_obj->IsJSTypedArray())
+ return isolate->heap()->false_value();
+
+ Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
+ Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
+ size_t offset = NumberToSize(isolate, *offset_obj);
+ size_t target_length = NumberToSize(isolate, target->length());
+ size_t source_length = NumberToSize(isolate, source->length());
+ size_t target_byte_length = NumberToSize(isolate, target->byte_length());
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ if (offset > target_length ||
+ offset + source_length > target_length ||
+ offset + source_length < offset) // overflow
+ return isolate->Throw(*isolate->factory()->NewRangeError(
+ "typed_array_set_source_too_large", HandleVector<Object>(NULL, 0)));
+
+ Handle<JSArrayBuffer> target_buffer(JSArrayBuffer::cast(target->buffer()));
+ Handle<JSArrayBuffer> source_buffer(JSArrayBuffer::cast(source->buffer()));
+ size_t target_offset = NumberToSize(isolate, target->byte_offset());
+ size_t source_offset = NumberToSize(isolate, source->byte_offset());
+ uint8_t* target_base =
+ static_cast<uint8_t*>(target_buffer->backing_store()) + target_offset;
+ uint8_t* source_base =
+ static_cast<uint8_t*>(source_buffer->backing_store()) + source_offset;
+
+ // Typed arrays of the same type: use memmove.
+ if (target->type() == source->type()) {
+ memmove(target_base + offset * target->element_size(),
+ source_base, source_byte_length);
+ return isolate->heap()->true_value();
+ }
+
+ // Typed arrays of different types over the same backing store
+ if ((source_base <= target_base &&
+ source_base + source_byte_length > target_base) ||
+ (target_base <= source_base &&
+ target_base + target_byte_length > source_base)) {
+ size_t target_element_size = target->element_size();
+ size_t source_element_size = source->element_size();
+
+ size_t source_length = NumberToSize(isolate, source->length());
+
+ // Copy left part
+ size_t left_index;
+ {
+ // First un-mutated byte after the next write
+ uint8_t* target_ptr = target_base + (offset + 1) * target_element_size;
+ // Next read at source_ptr. We do not care for memory changing before
+ // source_ptr - we have already copied it.
+ uint8_t* source_ptr = source_base;
+ for (left_index = 0;
+ left_index < source_length && target_ptr <= source_ptr;
+ left_index++) {
+ Handle<Object> v = Object::GetElement(
+ source, static_cast<uint32_t>(left_index));
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + left_index), v,
+ NONE, kNonStrictMode);
+ target_ptr += target_element_size;
+ source_ptr += source_element_size;
+ }
+ }
+ // Copy right part
+ size_t right_index;
+ {
+ // First unmutated byte before the next write
+ uint8_t* target_ptr =
+ target_base + (offset + source_length - 1) * target_element_size;
+ // Next read before source_ptr. We do not care for memory changing after
+ // source_ptr - we have already copied it.
+ uint8_t* source_ptr =
+ source_base + source_length * source_element_size;
+ for (right_index = source_length - 1;
+ right_index >= left_index && target_ptr >= source_ptr;
+ right_index--) {
+ Handle<Object> v = Object::GetElement(
+ source, static_cast<uint32_t>(right_index));
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + right_index), v,
+ NONE, kNonStrictMode);
+ target_ptr -= target_element_size;
+ source_ptr -= source_element_size;
+ }
+ }
+ // There can be at most 8 entries left in the middle that need buffering
+ // (because the largest element_size is 8 times the smallest).
+ ASSERT((right_index + 1) - left_index <= 8);
+ Handle<Object> temp[8];
+ size_t idx;
+ for (idx = left_index; idx <= right_index; idx++) {
+ temp[idx - left_index] = Object::GetElement(
+ source, static_cast<uint32_t>(idx));
+ }
+ for (idx = left_index; idx <= right_index; idx++) {
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + idx), temp[idx-left_index],
+ NONE, kNonStrictMode);
+ }
+ } else { // Non-overlapping typed arrays
+ for (size_t idx = 0; idx < source_length; idx++) {
+ Handle<Object> value = Object::GetElement(
+ source, static_cast<uint32_t>(idx));
+ JSObject::SetElement(
+ target, static_cast<uint32_t>(offset + idx), value,
+ NONE, kNonStrictMode);
+ }
+ }
+
+ return isolate->heap()->true_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -2181,6 +2319,14 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
+ NoHandleAllocation ha(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSFunction, f, 0);
+ return isolate->heap()->ToBoolean(f->shared()->is_generator());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
@@ -2292,8 +2438,7 @@
CallbacksDescriptor new_desc(name,
instance_desc->GetValue(index),
- static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
- details.descriptor_index());
+ static_cast<PropertyAttributes>(details.attributes() | READ_ONLY));
// Create a new map featuring the new field descriptors array.
Map* new_map;
@@ -2311,7 +2456,6 @@
PropertyDetails new_details(
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
details.type(),
- Representation::None(),
details.dictionary_index());
function->property_dictionary()->DetailsAtPut(entry, new_details);
}
@@ -2433,6 +2577,7 @@
generator->set_receiver(frame->receiver());
generator->set_continuation(0);
generator->set_operand_stack(isolate->heap()->empty_fixed_array());
+ generator->set_stack_handler_index(-1);
return generator;
}
@@ -2459,23 +2604,18 @@
if (operands_count == 0) {
ASSERT_EQ(generator_object->operand_stack(),
isolate->heap()->empty_fixed_array());
+ ASSERT_EQ(generator_object->stack_handler_index(), -1);
// If there are no operands on the stack, there shouldn't be a handler
// active either.
ASSERT(!frame->HasHandler());
} else {
- if (frame->HasHandler()) {
- // TODO(wingo): Unwind the stack handlers.
- UNIMPLEMENTED();
- }
-
- FixedArray* operand_stack;
+ int stack_handler_index = -1;
MaybeObject* alloc = isolate->heap()->AllocateFixedArray(operands_count);
+ FixedArray* operand_stack;
if (!alloc->To(&operand_stack)) return alloc;
-
- for (int i = 0; i < operands_count; i++) {
- operand_stack->set(i, frame->GetOperand(i));
- }
+ frame->SaveOperandStack(operand_stack, &stack_handler_index);
generator_object->set_operand_stack(operand_stack);
+ generator_object->set_stack_handler_index(stack_handler_index);
}
// Set continuation down here to avoid side effects if the operand stack
@@ -2525,14 +2665,10 @@
FixedArray* operand_stack = generator_object->operand_stack();
int operands_count = operand_stack->length();
if (operands_count != 0) {
- // TODO(wingo): Rewind stack handlers. However until
- // SuspendJSGeneratorObject unwinds them, we won't see frames with stack
- // handlers here.
- for (int i = 0; i < operands_count; i++) {
- ASSERT_EQ(frame->GetOperand(i), isolate->heap()->the_hole_value());
- Memory::Object_at(frame->GetOperandSlot(i)) = operand_stack->get(i);
- }
+ frame->RestoreOperandStack(operand_stack,
+ generator_object->stack_handler_index());
generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
+ generator_object->set_stack_handler_index(-1);
}
JSGeneratorObject::ResumeMode resume_mode =
@@ -4337,7 +4473,8 @@
KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
int offset = keyed_lookup_cache->Lookup(receiver_map, key);
if (offset != -1) {
- Object* value = receiver->FastPropertyAt(offset);
+ // Doubles are not cached, so raw read the value.
+ Object* value = receiver->RawFastPropertyAt(offset);
return value->IsTheHole()
? isolate->heap()->undefined_value()
: value;
@@ -4348,8 +4485,13 @@
receiver->LocalLookup(key, &result);
if (result.IsField()) {
int offset = result.GetFieldIndex().field_index();
- keyed_lookup_cache->Update(receiver_map, key, offset);
- return receiver->FastPropertyAt(offset);
+ // Do not track double fields in the keyed lookup cache. Reading
+ // double values requires boxing.
+ if (!FLAG_track_double_fields ||
+ !result.representation().IsDouble()) {
+ keyed_lookup_cache->Update(receiver_map, key, offset);
+ }
+ return receiver->FastPropertyAt(result.representation(), offset);
}
} else {
// Attempt dictionary lookup.
@@ -4525,6 +4667,7 @@
return lookup.holder()->GetNormalizedProperty(&lookup);
case FIELD:
return lookup.holder()->FastPropertyAt(
+ lookup.representation(),
lookup.GetFieldIndex().field_index());
case CONSTANT_FUNCTION:
return lookup.GetConstantFunction();
@@ -6123,7 +6266,8 @@
Object* number = args[0];
RUNTIME_ASSERT(number->IsNumber());
- return isolate->heap()->NumberToString(number, false);
+ return isolate->heap()->NumberToString(
+ number, false, isolate->heap()->GetPretenureMode());
}
@@ -10040,14 +10184,18 @@
return heap->undefined_value();
}
return value;
- case FIELD:
- value =
+ case FIELD: {
+ Object* value;
+ MaybeObject* maybe_value =
JSObject::cast(result->holder())->FastPropertyAt(
+ result->representation(),
result->GetFieldIndex().field_index());
+ if (!maybe_value->To(&value)) return maybe_value;
if (value->IsTheHole()) {
return heap->undefined_value();
}
return value;
+ }
case CONSTANT_FUNCTION:
return result->GetConstantFunction();
case CALLBACKS: {
diff --git a/src/runtime.h b/src/runtime.h
index 2a102e1..c91fee6 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -231,6 +231,7 @@
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionIsGenerator, 1, 1) \
F(FunctionBindArguments, 4, 1) \
F(BoundFunctionGetBindings, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
@@ -360,6 +361,7 @@
F(TypedArrayGetByteLength, 1, 1) \
F(TypedArrayGetByteOffset, 1, 1) \
F(TypedArrayGetLength, 1, 1) \
+ F(TypedArraySetFastCases, 3, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
diff --git a/src/serialize.cc b/src/serialize.cc
index d4f31c1..3e70edc 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -558,6 +558,11 @@
UNCLASSIFIED,
58,
"Runtime::AllocateInOldPointerSpace");
+ Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate).
+ address(),
+ UNCLASSIFIED,
+ 59,
+ "Heap::NewSpaceAllocationLimitAddress");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
@@ -568,7 +573,7 @@
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 59 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 60 + entry, "lazy_deopt");
}
}
diff --git a/src/spaces.cc b/src/spaces.cc
index df1c3ef..099ad93 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2304,14 +2304,6 @@
// Don't free list allocate if there is linear space available.
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
- int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) return NULL;
-
-
- int bytes_left = new_node_size - size_in_bytes;
- ASSERT(bytes_left >= 0);
-
int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
@@ -2321,6 +2313,16 @@
owner_->heap()->incremental_marking()->OldSpaceStep(
size_in_bytes - old_linear_size);
+ int new_node_size = 0;
+ FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ if (new_node == NULL) {
+ owner_->SetTop(NULL, NULL);
+ return NULL;
+ }
+
+ int bytes_left = new_node_size - size_in_bytes;
+ ASSERT(bytes_left >= 0);
+
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 61eec0d..ebe1b5b 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -350,9 +350,8 @@
}
int real_size = map->NumberOfOwnDescriptors();
DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- if (details.descriptor_index() > real_size) continue;
if (details.type() == FIELD) {
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
@@ -368,7 +367,7 @@
key->ShortPrint();
}
Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+ Object* value = js_object->RawFastPropertyAt(descs->GetFieldIndex(i));
Add("%o\n", value);
}
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 6374877..f43c9ac 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -221,11 +221,12 @@
Handle<Code> StubCache::ComputeLoadField(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field) {
+ PropertyIndex field,
+ Representation representation) {
if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(LoadStubCompiler::receiver(),
- field.is_inobject(holder),
- field.translate(holder));
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
return stub.GetCode(isolate());
}
@@ -236,7 +237,7 @@
LoadStubCompiler compiler(isolate_);
Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field);
+ compiler.CompileLoadField(receiver, holder, name, field, representation);
JSObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -337,11 +338,12 @@
Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field) {
+ PropertyIndex field,
+ Representation representation) {
if (receiver.is_identical_to(holder)) {
- LoadFieldStub stub(KeyedLoadStubCompiler::receiver(),
- field.is_inobject(holder),
- field.translate(holder));
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
return stub.GetCode(isolate());
}
@@ -352,7 +354,7 @@
KeyedLoadStubCompiler compiler(isolate_);
Handle<Code> handler =
- compiler.CompileLoadField(receiver, holder, name, field);
+ compiler.CompileLoadField(receiver, holder, name, field, representation);
JSObject::UpdateMapCodeCache(stub_holder, name, handler);
return handler;
}
@@ -431,15 +433,7 @@
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) {
- MapHandleList embedded_maps;
- stub->FindAllMaps(&embedded_maps);
- for (int i = 0; i < embedded_maps.length(); i++) {
- if (embedded_maps.at(i).is_identical_to(transition)) {
- return stub;
- }
- }
- }
+ if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
@@ -589,15 +583,7 @@
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) {
- MapHandleList embedded_maps;
- stub->FindAllMaps(&embedded_maps);
- for (int i = 0; i < embedded_maps.length(); i++) {
- if (embedded_maps.at(i).is_identical_to(transition)) {
- return stub;
- }
- }
- }
+ if (!stub.is_null()) return stub;
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> code =
@@ -1059,45 +1045,40 @@
void StubCache::CollectMatchingMaps(SmallMapList* types,
- Name* name,
+ Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == name) {
+ if (primary_[i].key == *name) {
Map* map = primary_[i].map;
// Map can be NULL, if the stub is constant function call
// with a primitive receiver.
if (map == NULL) continue;
- int offset = PrimaryOffset(name, flags, map);
+ int offset = PrimaryOffset(*name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
+ types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
}
for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == name) {
+ if (secondary_[i].key == *name) {
Map* map = secondary_[i].map;
// Map can be NULL, if the stub is constant function call
// with a primitive receiver.
if (map == NULL) continue;
// Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(name, flags, map);
- Entry* primary_entry = entry(primary_, primary_offset);
- if (primary_entry->key == name) {
- Map* primary_map = primary_entry->map;
- if (map == primary_map) continue;
- }
+ int primary_offset = PrimaryOffset(*name, flags, map);
// Lookup in secondary table and add matches.
- int offset = SecondaryOffset(name, flags, primary_offset);
+ int offset = SecondaryOffset(*name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
!TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
- types->Add(Handle<Map>(map), zone);
+ types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
}
@@ -1517,16 +1498,17 @@
}
-Handle<Code> BaseLoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- PropertyIndex field) {
+Handle<Code> BaseLoadStubCompiler::CompileLoadField(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ PropertyIndex field,
+ Representation representation) {
Label miss;
Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
- LoadFieldStub stub(reg, field.is_inobject(holder), field.translate(holder));
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ GenerateLoadField(reg, holder, field, representation);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
@@ -1536,19 +1518,6 @@
}
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
Handle<JSObject> object,
Handle<JSObject> holder,
@@ -1611,17 +1580,16 @@
if (lookup->IsField()) {
PropertyIndex field = lookup->GetFieldIndex();
if (interceptor_holder.is_identical_to(holder)) {
- LoadFieldStub stub(interceptor_reg,
- field.is_inobject(holder),
- field.translate(holder));
- GenerateTailCall(masm(), stub.GetCode(isolate()));
+ GenerateLoadField(
+ interceptor_reg, holder, field, lookup->representation());
} else {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
Register reg = HandlerFrontend(
interceptor_holder, interceptor_reg, holder, name, &success);
__ bind(&success);
- GenerateLoadField(reg, holder, field);
+ GenerateLoadField(
+ reg, holder, field, lookup->representation());
}
} else {
// We found CALLBACKS property in prototype chain of interceptor's
@@ -1673,7 +1641,7 @@
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name) {
- Label miss, miss_restore_name;
+ Label miss, miss_restore_name, slow;
GenerateNameCheck(name, this->name(), &miss);
@@ -1683,15 +1651,19 @@
transition,
name,
receiver(), this->name(), value(),
- scratch1(), scratch2(),
+ scratch1(), scratch2(), scratch3(),
&miss,
- &miss_restore_name);
+ &miss_restore_name,
+ &slow);
// Handle store cache miss.
GenerateRestoreName(masm(), &miss_restore_name, name);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
+ GenerateRestoreName(masm(), &slow, name);
+ TailCallBuiltin(masm(), SlowBuiltin(kind()));
+
// Return the generated code.
return GetICCode(kind(), Code::MAP_TRANSITION, name);
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 6a08d95..cbaeace 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -112,7 +112,8 @@
Handle<Code> ComputeLoadField(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex field_index);
+ PropertyIndex field_index,
+ Representation representation);
Handle<Code> ComputeLoadCallback(Handle<Name> name,
Handle<JSObject> object,
@@ -147,7 +148,8 @@
Handle<Code> ComputeKeyedLoadField(Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex field_index);
+ PropertyIndex field_index,
+ Representation representation);
Handle<Code> ComputeKeyedLoadCallback(
Handle<Name> name,
@@ -311,7 +313,7 @@
// Collect all maps that match the name and flags.
void CollectMatchingMaps(SmallMapList* types,
- Name* name,
+ Handle<Name> name,
Code::Flags flags,
Handle<Context> native_context,
Zone* zone);
@@ -506,13 +508,9 @@
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
- Handle<JSObject> holder,
- PropertyIndex index);
- static void DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index);
+ bool inobject,
+ int index,
+ Representation representation);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
@@ -542,8 +540,10 @@
Register value_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
Label* miss_label,
- Label* miss_restore_name);
+ Label* miss_restore_name,
+ Label* slow);
void GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
@@ -565,6 +565,14 @@
}
return Builtins::kLoadIC_Miss;
}
+ static Builtins::Name SlowBuiltin(Code::Kind kind) {
+ switch (kind) {
+ case Code::STORE_IC: return Builtins::kStoreIC_Slow;
+ case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
+ default: UNREACHABLE();
+ }
+ return Builtins::kStoreIC_Slow;
+ }
static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
// Generates code that verifies that the property holder has not changed
@@ -643,7 +651,8 @@
Handle<Code> CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
- PropertyIndex index);
+ PropertyIndex index,
+ Representation representation);
Handle<Code> CompileLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
@@ -695,7 +704,8 @@
void GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index);
+ PropertyIndex field,
+ Representation representation);
void GenerateLoadConstant(Handle<JSFunction> value);
void GenerateLoadCallback(Register reg,
Handle<ExecutableAccessorInfo> callback);
diff --git a/src/third_party/vtune/v8-vtune.h b/src/third_party/vtune/v8-vtune.h
index 36c11e6..29ea3ea 100644
--- a/src/third_party/vtune/v8-vtune.h
+++ b/src/third_party/vtune/v8-vtune.h
@@ -60,7 +60,7 @@
namespace vTune {
-void InitilizeVtuneForV8();
+void InitializeVtuneForV8();
} // namespace vTune
diff --git a/src/third_party/vtune/vtune-jit.cc b/src/third_party/vtune/vtune-jit.cc
index 6ff595f..d3f7a68 100644
--- a/src/third_party/vtune/vtune-jit.cc
+++ b/src/third_party/vtune/vtune-jit.cc
@@ -194,8 +194,8 @@
if ((*script->GetScriptName())->IsString()) {
Handle<String> script_name =
Handle<String>(String::Cast(*script->GetScriptName()));
- temp_file_name = new char[script_name->Length() + 1];
- script_name->WriteAscii(temp_file_name);
+ temp_file_name = new char[script_name->Utf8Length() + 1];
+ script_name->WriteUtf8(temp_file_name);
jmethod.source_file_name = temp_file_name;
}
@@ -267,7 +267,7 @@
} // namespace internal
-void InitilizeVtuneForV8() {
+void InitializeVtuneForV8() {
if (v8::V8::Initialize()) {
v8::V8::SetFlagsFromString("--nocompact_code_space",
(int)strlen("--nocompact_code_space"));
diff --git a/src/type-info.cc b/src/type-info.cc
index 3bc509a..1757bee 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -67,7 +67,7 @@
isolate_(isolate),
zone_(zone) {
BuildDictionary(code);
- ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
+ ASSERT(dictionary_->IsDictionary());
}
@@ -539,15 +539,6 @@
}
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
- Zone* zone) {
- for (int i = 0; i < list->length(); ++i) {
- if (list->at(i).is_identical_to(map)) return;
- }
- list->Add(map, zone);
-}
-
-
void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
SmallMapList* types) {
MapHandleList maps;
@@ -556,7 +547,7 @@
for (int i = 0; i < maps.length(); i++) {
Handle<Map> map(maps.at(i));
if (!CanRetainOtherContext(*map, *native_context_)) {
- AddMapIfMissing(map, types, zone());
+ types->AddMapIfMissing(map, zone());
}
}
}
@@ -574,7 +565,7 @@
// we need a generic store (or load) here.
ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
} else if (object->IsMap()) {
- types->Add(Handle<Map>::cast(object), zone());
+ types->AddMapIfMissing(Handle<Map>::cast(object), zone());
} else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC) {
CollectPolymorphicMaps(Handle<Code>::cast(object), types);
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
@@ -582,7 +573,7 @@
types->Reserve(4, zone());
ASSERT(object->IsCode());
isolate_->stub_cache()->CollectMatchingMaps(types,
- *name,
+ name,
flags,
native_context_,
zone());
diff --git a/src/typedarray.js b/src/typedarray.js
index 4e50f7f..4fade00 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -31,70 +31,20 @@
// in runtime.js:
// var $Array = global.Array;
-var $ArrayBuffer = global.ArrayBuffer;
-// -------------------------------------------------------------------
-
-function ArrayBufferConstructor(byteLength) { // length = 1
- if (%_IsConstructCall()) {
- var l = TO_POSITIVE_INTEGER(byteLength);
- %ArrayBufferInitialize(this, l);
- } else {
- return new $ArrayBuffer(byteLength);
- }
-}
-
-function ArrayBufferGetByteLength() {
- if (!IS_ARRAYBUFFER(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['ArrayBuffer.prototype.byteLength', this]);
- }
- return %ArrayBufferGetByteLength(this);
-}
-
-// ES6 Draft 15.13.5.5.3
-function ArrayBufferSlice(start, end) {
- if (!IS_ARRAYBUFFER(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['ArrayBuffer.prototype.slice', this]);
- }
-
- var relativeStart = TO_INTEGER(start);
- var first;
- if (relativeStart < 0) {
- first = MathMax(this.byteLength + relativeStart, 0);
- } else {
- first = MathMin(relativeStart, this.byteLength);
- }
- var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end);
- var fin;
- if (relativeEnd < 0) {
- fin = MathMax(this.byteLength + relativeEnd, 0);
- } else {
- fin = MathMin(relativeEnd, this.byteLength);
- }
-
- var newLen = fin - first;
- // TODO(dslomov): implement inheritance
- var result = new $ArrayBuffer(newLen);
-
- %ArrayBufferSliceImpl(this, result, first);
- return result;
-}
// --------------- Typed Arrays ---------------------
function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
- var offset = IS_UNDEFINED(byteOffset)
- ? 0 : offset = TO_POSITIVE_INTEGER(byteOffset);
+ var offset = IS_UNDEFINED(byteOffset) ? 0 : TO_POSITIVE_INTEGER(byteOffset);
if (offset % elementSize !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
"start offset", name, elementSize);
}
var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- if (offset >= bufferByteLength) {
+ if (offset > bufferByteLength) {
throw MakeRangeError("invalid_typed_array_offset");
}
@@ -111,7 +61,7 @@
var newLength = TO_POSITIVE_INTEGER(length);
newByteLength = newLength * elementSize;
}
- if (newByteLength > bufferByteLength) {
+ if (offset + newByteLength > bufferByteLength) {
throw MakeRangeError("invalid_typed_array_length");
}
%TypedArrayInitialize(obj, arrayId, buffer, offset, newByteLength);
@@ -120,16 +70,31 @@
function ConstructByLength(obj, length) {
var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
var byteLength = l * elementSize;
+ var buffer = new global.ArrayBuffer(byteLength);
+ %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ }
+
+ function ConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+ var byteLength = l * elementSize;
var buffer = new $ArrayBuffer(byteLength);
%TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ for (var i = 0; i < l; i++) {
+ obj[i] = arrayLike[i];
+ }
}
return function (arg1, arg2, arg3) {
if (%_IsConstructCall()) {
if (IS_ARRAYBUFFER(arg1)) {
ConstructByArrayBuffer(this, arg1, arg2, arg3);
- } else {
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) || IS_BOOLEAN(arg1)) {
ConstructByLength(this, arg1);
+ } else if (!IS_UNDEFINED(arg1)){
+ ConstructByArrayLike(this, arg1);
+ } else {
+ throw MakeTypeError("parameterless_typed_array_constr", name);
}
} else {
return new constructor(arg1, arg2, arg3);
@@ -153,27 +118,51 @@
return %TypedArrayGetLength(this);
}
+function CreateSubArray(elementSize, constructor) {
+ return function(begin, end) {
+ var srcLength = %TypedArrayGetLength(this);
+ var beginInt = TO_INTEGER(begin);
+ if (beginInt < 0) {
+ beginInt = MathMax(0, srcLength + beginInt);
+ } else {
+ beginInt = MathMin(srcLength, beginInt);
+ }
-// -------------------------------------------------------------------
-
-function SetUpArrayBuffer() {
- %CheckIsBootstrapping();
-
- // Set up the ArrayBuffer constructor function.
- %SetCode($ArrayBuffer, ArrayBufferConstructor);
- %FunctionSetPrototype($ArrayBuffer, new $Object());
-
- // Set up the constructor property on the ArrayBuffer prototype object.
- %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
-
- InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength);
-
- InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
- "slice", ArrayBufferSlice
- ));
+ var endInt = IS_UNDEFINED(end) ? srcLength : TO_INTEGER(end);
+ if (endInt < 0) {
+ endInt = MathMax(0, srcLength + endInt);
+ } else {
+ endInt = MathMin(endInt, srcLength);
+ }
+ if (endInt < beginInt) {
+ endInt = beginInt;
+ }
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %TypedArrayGetByteOffset(this) + beginInt * elementSize;
+ return new constructor(%TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
+ }
}
-SetUpArrayBuffer();
+function TypedArraySet(obj, offset) {
+ var intOffset = IS_UNDEFINED(offset) ? 0 : TO_POSITIVE_INTEGER(offset);
+ if (%TypedArraySetFastCases(this, obj, intOffset))
+ return;
+
+ var l = obj.length;
+ if (IS_UNDEFINED(l)) {
+ throw MakeTypeError("invalid_argument");
+ }
+ if (intOffset + l > this.length) {
+ throw MakeRangeError("typed_array_set_source_too_large");
+ }
+ for (var i = 0; i < l; i++) {
+ this[intOffset + i] = obj[i];
+ }
+}
+
+// -------------------------------------------------------------------
function SetupTypedArray(arrayId, name, constructor, elementSize) {
%CheckIsBootstrapping();
@@ -191,6 +180,11 @@
InstallGetter(constructor.prototype, "byteOffset", TypedArrayGetByteOffset);
InstallGetter(constructor.prototype, "byteLength", TypedArrayGetByteLength);
InstallGetter(constructor.prototype, "length", TypedArrayGetLength);
+
+ InstallFunctions(constructor.prototype, DONT_ENUM, $Array(
+ "subarray", CreateSubArray(elementSize, constructor),
+ "set", TypedArraySet
+ ));
}
// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
@@ -202,4 +196,4 @@
SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
-
+SetupTypedArray(9, "Uint8ClampedArray", global.Uint8ClampedArray, 1);
diff --git a/src/v8.h b/src/v8.h
index b041fc2..5ead877 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -48,6 +48,9 @@
#error both DEBUG and NDEBUG are set
#endif
+// TODO(dcarney): remove this
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+
// Basic includes
#include "../include/v8.h"
#include "v8globals.h"
diff --git a/src/v8memory.h b/src/v8memory.h
index f71de82..c72ce7a 100644
--- a/src/v8memory.h
+++ b/src/v8memory.h
@@ -64,6 +64,14 @@
return *reinterpret_cast<unsigned*>(addr);
}
+ static intptr_t& intptr_at(Address addr) {
+ return *reinterpret_cast<intptr_t*>(addr);
+ }
+
+ static uintptr_t& uintptr_at(Address addr) {
+ return *reinterpret_cast<uintptr_t*>(addr);
+ }
+
static double& double_at(Address addr) {
return *reinterpret_cast<double*>(addr);
}
diff --git a/src/v8natives.js b/src/v8natives.js
index 425863e..9266af6 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -1665,7 +1665,6 @@
func = %GetCallTrap(func);
}
- // TODO(wingo): Print source using function* for generators.
if (!IS_FUNCTION(func)) {
throw new $TypeError('Function.prototype.toString is not generic');
}
@@ -1684,7 +1683,8 @@
var name = %FunctionNameShouldPrintAsAnonymous(func)
? 'anonymous'
: %FunctionGetName(func);
- return 'function ' + name + source;
+ var head = %FunctionIsGenerator(func) ? 'function* ' : 'function ';
+ return head + name + source;
}
diff --git a/src/version.cc b/src/version.cc
index 4b5347c..baafcf7 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 18
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 2
+#define MINOR_VERSION 19
+#define BUILD_NUMBER 0
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index aba2a38..f7ded18 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -75,6 +75,28 @@
}
+void LoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -103,9 +125,10 @@
int constant_stack_parameter_count) {
// register state
// rax -- number of arguments
+ // rdi -- function
// rbx -- type info cell with elements kind
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rdi, rbx };
+ descriptor->register_param_count_ = 2;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &rax;
@@ -2392,12 +2415,6 @@
}
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, rax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -3819,6 +3836,7 @@
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(isolate,
LAST_FAST_ELEMENTS_KIND);
+ __ JumpIfNotSmi(rcx, &miss);
__ Cmp(rcx, terminal_kind_sentinel);
__ j(above, &miss);
// Make sure the function is the Array() function
@@ -4908,8 +4926,34 @@
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
+
+ Label skip_write_barrier, after_writing;
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(masm->isolate());
+ __ Load(rbx, high_promotion_mode);
+ __ testb(rbx, Immediate(1));
+ __ j(zero, &skip_write_barrier);
+
+ __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ RecordWriteField(rcx,
+ ConsString::kFirstOffset,
+ rax,
+ rbx,
+ kDontSaveFPRegs);
+ __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ RecordWriteField(rcx,
+ ConsString::kSecondOffset,
+ rdx,
+ rbx,
+ kDontSaveFPRegs);
+ __ jmp(&after_writing);
+
+ __ bind(&skip_write_barrier);
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+
+ __ bind(&after_writing);
+
__ movq(rax, rcx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -6348,8 +6392,11 @@
{ REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
// StoreArrayLiteralElementStub::Generate
{ REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate
+ // FastNewClosureStub::Generate and
+ // StringAddStub::Generate
{ REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
+ // StringAddStub::Generate
+ { REG(rcx), REG(rax), REG(rbx), EMIT_REMEMBERED_SET},
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@@ -6902,14 +6949,8 @@
// Get the elements kind and case on that.
__ Cmp(rbx, undefined_sentinel);
__ j(equal, &no_info);
- __ movq(rdx, FieldOperand(rbx, kPointerSize));
-
- // There is no info if the call site went megamorphic either
-
- // TODO(mvstanton): Really? I thought if it was the array function that
- // the cell wouldn't get stamped as megamorphic.
- __ Cmp(rdx, TypeFeedbackCells::MegamorphicSentinel(masm->isolate()));
- __ j(equal, &no_info);
+ __ movq(rdx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+ __ JumpIfNotSmi(rdx, &no_info);
__ SmiToInteger32(rdx, rdx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a20d468..19fa0aa 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1554,7 +1554,8 @@
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
+ if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
+ expr->depth() > 1) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
@@ -1924,11 +1925,12 @@
Label resume;
__ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ j(not_equal, &resume);
- __ pop(result_register());
if (expr->yield_kind() == Yield::SUSPEND) {
- // TODO(wingo): Box into { value: VALUE, done: false }.
+ EmitReturnIteratorResult(false);
+ } else {
+ __ pop(result_register());
+ EmitReturnSequence();
}
- EmitReturnSequence();
__ bind(&resume);
context()->Plug(result_register());
@@ -1940,18 +1942,7 @@
__ Move(FieldOperand(result_register(),
JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
- __ pop(result_register());
- // TODO(wingo): Box into { value: VALUE, done: true }.
-
- // Exit all nested statements.
- NestedStatement* current = nesting_stack_;
- int stack_depth = 0;
- int context_length = 0;
- while (current != NULL) {
- current = current->Exit(&stack_depth, &context_length);
- }
- __ Drop(stack_depth);
- EmitReturnSequence();
+ EmitReturnIteratorResult(true);
break;
}
@@ -2058,6 +2049,56 @@
}
+void FullCodeGenerator::EmitReturnIteratorResult(bool done) {
+ Label gc_required;
+ Label allocated;
+
+ Handle<Map> map(isolate()->native_context()->generator_result_map());
+
+ __ Allocate(map->instance_size(), rax, rcx, rdx, &gc_required, TAG_OBJECT);
+
+ __ bind(&allocated);
+ __ Move(rbx, map);
+ __ pop(rcx);
+ __ Move(rdx, isolate()->factory()->ToBoolean(done));
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ Move(FieldOperand(rax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ movq(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
+ rcx);
+ __ movq(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
+ rdx);
+
+ // Only the value field needs a write barrier, as the other values are in the
+ // root set.
+ __ RecordWriteField(rax, JSGeneratorObject::kResultValuePropertyOffset,
+ rcx, rdx, kDontSaveFPRegs);
+
+ if (done) {
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ int context_length = 0;
+ while (current != NULL) {
+ current = current->Exit(&stack_depth, &context_length);
+ }
+ __ Drop(stack_depth);
+ }
+
+ EmitReturnSequence();
+
+ __ bind(&gc_required);
+ __ Push(Smi::FromInt(map->instance_size()));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ movq(context_register(),
+ Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(&allocated);
+}
+
+
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 0a9ceaa..761e05a 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1528,6 +1528,26 @@
}
+void StoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 61eb7d1..f908ea1 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1674,19 +1674,27 @@
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
+ if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+ if (right->IsConstantOperand()) {
+ int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+ } else {
+ Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+ __ lea(ToRegister(instr->result()), address);
+ }
} else {
- __ addl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
+ if (right->IsConstantOperand()) {
+ __ addl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ __ addl(ToRegister(left), ToRegister(right));
+ } else {
+ __ addl(ToRegister(left), ToOperand(right));
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
}
@@ -2668,29 +2676,21 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+ int offset = instr->hydrogen()->offset();
Register object = ToRegister(instr->object());
- if (!FLAG_track_double_fields) {
- ASSERT(!instr->hydrogen()->representation().IsDouble());
- }
- Register temp = instr->hydrogen()->representation().IsDouble()
- ? ToRegister(instr->temp()) : ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ movq(temp, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(temp, FieldOperand(temp, instr->hydrogen()->offset()));
+ if (FLAG_track_double_fields &&
+ instr->hydrogen()->representation().IsDouble()) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movsd(result, FieldOperand(object, offset));
+ return;
}
- if (instr->hydrogen()->representation().IsDouble()) {
- Label load_from_heap_number, done;
- XMMRegister result = ToDoubleRegister(instr->result());
- __ JumpIfNotSmi(temp, &load_from_heap_number);
- __ SmiToInteger32(temp, temp);
- __ cvtlsi2sd(result, temp);
- __ jmp(&done);
- __ bind(&load_from_heap_number);
- __ movsd(result, FieldOperand(temp, HeapNumber::kValueOffset));
- __ bind(&done);
+ Register result = ToRegister(instr->result());
+ if (instr->hydrogen()->is_in_object()) {
+ __ movq(result, FieldOperand(object, offset));
+ } else {
+ __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(result, FieldOperand(result, offset));
}
}
@@ -2854,41 +2854,6 @@
}
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(rax)) ? rbx : rax);
- __ push(temp);
- __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Immediate(Map::kElementsKindMask));
- __ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3911,8 +3876,7 @@
__ Set(rax, instr->arity());
__ Move(rbx, instr->hydrogen()->property_cell());
- Object* cell_value = instr->hydrogen()->property_cell()->value();
- ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value());
+ ElementsKind kind = instr->hydrogen()->elements_kind();
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
@@ -3945,6 +3909,8 @@
int offset = instr->offset();
+ Handle<Map> transition = instr->transition();
+
if (FLAG_track_fields && representation.IsSmi()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
@@ -3955,18 +3921,15 @@
Register value = ToRegister(instr->value());
__ Integer32ToSmi(value, value);
}
- } else if (FLAG_track_double_fields && representation.IsDouble() &&
- !instr->hydrogen()->value()->type().IsSmi() &&
- !instr->hydrogen()->value()->type().IsHeapNumber()) {
- Register value = ToRegister(instr->value());
- Label do_store;
- __ JumpIfSmi(value, &do_store);
- Handle<Map> map(isolate()->factory()->heap_number_map());
- DoCheckMapCommon(value, map, REQUIRE_EXACT_MAP, instr);
- __ bind(&do_store);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ ASSERT(transition.is_null());
+ ASSERT(instr->is_in_object());
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ XMMRegister value = ToDoubleRegister(instr->value());
+ __ movsd(FieldOperand(object, offset), value);
+ return;
}
- Handle<Map> transition = instr->transition();
if (!transition.is_null()) {
if (transition->CanBeDeprecated()) {
transition_maps_.Add(transition, info()->zone());
@@ -4011,6 +3974,7 @@
ToRegister(operand_value));
} else {
Handle<Object> handle_value = ToHandle(operand_value);
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ Move(FieldOperand(write_register, offset), handle_value);
}
} else {
@@ -4803,6 +4767,8 @@
} else {
mode = NUMBER_CANDIDATE_IS_SMI;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
@@ -5261,7 +5227,8 @@
// Set up the parameters to the stub/runtime call and pick the right
// runtime function or stub to call.
int properties_count = instr->hydrogen()->constant_properties_length() / 2;
- if (instr->hydrogen()->depth() > 1) {
+ if ((FLAG_track_double_fields && instr->hydrogen()->may_store_doubles()) ||
+ instr->hydrogen()->depth() > 1) {
__ PushHeapObject(literals);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(constant_properties);
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 094f5ed..f49f7d6 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -685,7 +685,7 @@
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort("Out of virtual registers while trying to allocate temp register.");
- return NULL;
+ vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
@@ -782,8 +782,8 @@
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return DefineSameAsFirst(result);
}
@@ -1309,8 +1309,8 @@
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
@@ -1473,8 +1473,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseOrConstant(instr->BetterRightOperand());
LMulI* mul = new(zone()) LMulI(left, right);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1513,13 +1513,24 @@
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
+ // Check to see if it would be advantageous to use an lea instruction rather
+ // than an add. This is the case when no overflow check is needed and there
+ // are multiple uses of the add's inputs, so using a 3-register add will
+ // preserve all input values for later uses.
+ bool use_lea = LAddI::UseLea(instr);
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ HValue* right_candidate = instr->BetterRightOperand();
+ LOperand* right = use_lea
+ ? UseRegisterOrConstantAtStart(right_candidate)
+ : UseOrConstantAtStart(right_candidate);
LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+ LInstruction* result = use_lea
+ ? DefineAsRegister(add)
+ : DefineSameAsFirst(add);
+ if (can_overflow) {
result = AssignEnvironment(result);
}
return result;
@@ -1539,8 +1550,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
+ left = UseRegisterAtStart(instr->BetterLeftOperand());
+ right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -2028,9 +2039,7 @@
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LOperand* obj = UseRegisterAtStart(instr->object());
- LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
- ASSERT(temp == NULL || FLAG_track_double_fields);
- return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2065,12 +2074,6 @@
}
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2256,6 +2259,9 @@
val = UseRegisterOrConstant(instr->value());
} else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
+ } else if (FLAG_track_double_fields &&
+ instr->field_representation().IsDouble()) {
+ val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
}
@@ -2266,8 +2272,7 @@
needs_write_barrier_for_map) ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
- (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ if (FLAG_track_fields && instr->field_representation().IsSmi()) {
return AssignEnvironment(result);
}
return result;
@@ -2384,7 +2389,7 @@
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index f288391..d1f7e76 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -123,7 +123,6 @@
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
- V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -1346,6 +1345,11 @@
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ static bool UseLea(HAdd* add) {
+ return !add->CheckFlag(HValue::kCanOverflow) &&
+ add->BetterLeftOperand()->UseCount() > 1;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@@ -1459,15 +1463,13 @@
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadNamedField(LOperand* object, LOperand* temp) {
+ explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
- temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
@@ -1514,18 +1516,6 @@
};
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index d9db3ad..691894c 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -4066,9 +4066,33 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ Label allocate_new_space, install_map;
+ AllocationFlags flags = TAG_OBJECT;
+
+ ExternalReference high_promotion_mode = ExternalReference::
+ new_space_high_promotion_mode_active_address(isolate());
+
+ Load(scratch1, high_promotion_mode);
+ testb(scratch1, Immediate(1));
+ j(zero, &allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
+
+ jmp(&install_map);
+
+ bind(&allocate_new_space);
+ Allocate(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ flags);
+
+ bind(&install_map);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 67750d1..a7faf9b 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -343,11 +343,13 @@
}
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index,
+ Representation representation) {
+ ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -745,8 +747,10 @@
Register value_reg,
Register scratch1,
Register scratch2,
+ Register unused,
Label* miss_label,
- Label* miss_restore_name) {
+ Label* miss_restore_name,
+ Label* slow) {
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
@@ -765,16 +769,6 @@
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
- if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
- __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
- __ bind(&do_store);
- }
-
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -790,7 +784,7 @@
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name);
+ scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
@@ -809,6 +803,28 @@
}
}
+ Register storage_reg = name_reg;
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_restore_name);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store, heap_number;
+ __ AllocateHeapNumber(storage_reg, scratch1, slow);
+
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch1, value_reg);
+ __ cvtlsi2sd(xmm0, scratch1);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_restore_name, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+ __ bind(&do_store);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ }
+
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -834,12 +850,11 @@
__ Move(scratch1, transition);
__ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
- name_reg,
+ scratch2,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -856,12 +871,20 @@
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ movq(FieldOperand(receiver_reg, offset), storage_reg);
+ } else {
+ __ movq(FieldOperand(receiver_reg, offset), value_reg);
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ movq(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(
receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
}
@@ -870,12 +893,20 @@
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), value_reg);
+ if (FLAG_track_double_fields && representation.IsDouble()) {
+ __ movq(FieldOperand(scratch1, offset), storage_reg);
+ } else {
+ __ movq(FieldOperand(scratch1, offset), value_reg);
+ }
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
+ if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ __ movq(name_reg, value_reg);
+ } else {
+ ASSERT(storage_reg.is(name_reg));
+ }
__ RecordWriteField(
scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
}
@@ -923,11 +954,35 @@
if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
- Label do_store;
- __ JumpIfSmi(value_reg, &do_store);
+ // Load the double storage.
+ if (index < 0) {
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(scratch1, FieldOperand(receiver_reg, offset));
+ } else {
+ __ movq(scratch1,
+ FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(scratch1, FieldOperand(scratch1, offset));
+ }
+
+ // Store the value into the storage.
+ Label do_store, heap_number;
+ __ JumpIfNotSmi(value_reg, &heap_number);
+ __ SmiToInteger32(scratch2, value_reg);
+ __ cvtlsi2sd(xmm0, scratch2);
+ __ jmp(&do_store);
+
+ __ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
__ bind(&do_store);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ // Return the value (register rax).
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
+ return;
}
// TODO(verwaest): Share this code as a code stub.
@@ -1196,10 +1251,20 @@
void BaseLoadStubCompiler::GenerateLoadField(Register reg,
Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
+ PropertyIndex field,
+ Representation representation) {
+ if (!reg.is(receiver())) __ movq(receiver(), reg);
+ if (kind() == Code::LOAD_IC) {
+ LoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ } else {
+ KeyedLoadFieldStub stub(field.is_inobject(holder),
+ field.translate(holder),
+ representation);
+ GenerateTailCall(masm(), stub.GetCode(isolate()));
+ }
}
@@ -1454,7 +1519,8 @@
Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
name, &miss);
- GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+ GenerateFastPropertyLoad(masm(), rdi, reg, index.is_inobject(holder),
+ index.translate(holder), Representation::Tagged());
// Check that the function really is a function.
__ JumpIfSmi(rdi, &miss);
diff --git a/test/cctest/cctest.cc b/test/cctest/cctest.cc
index a0091ff..1cdaca4 100644
--- a/test/cctest/cctest.cc
+++ b/test/cctest/cctest.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include <v8.h>
#include "cctest.h"
#include "debug.h"
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index 854d89e..c249b5e 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -83,17 +83,22 @@
const char* name() { return name_; }
const char* dependency() { return dependency_; }
bool enabled() { return enabled_; }
- static void set_default_isolate(v8::Isolate* default_isolate) {
- default_isolate_ = default_isolate;
- }
static v8::Isolate* default_isolate() { return default_isolate_; }
- static v8::Isolate* isolate() { return context_->GetIsolate(); }
- static v8::Handle<v8::Context> env() { return context_; }
+
+ static v8::Handle<v8::Context> env() {
+ return v8::Local<v8::Context>::New(default_isolate_, context_);
+ }
+
+ static v8::Isolate* isolate() { return default_isolate_; }
// Helper function to initialize the VM.
static void InitializeVM(CcTestExtensionFlags extensions = NO_EXTENSIONS);
private:
+ friend int main(int argc, char** argv);
+ static void set_default_isolate(v8::Isolate* default_isolate) {
+ default_isolate_ = default_isolate;
+ }
TestFunction* callback_;
const char* file_;
const char* name_;
@@ -195,15 +200,21 @@
const char* name_;
};
-
+namespace v8 {
// A LocalContext holds a reference to a v8::Context.
class LocalContext {
public:
LocalContext(v8::ExtensionConfiguration* extensions = 0,
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>(),
- v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
- : context_(v8::Context::New(extensions, global_template, global_object)) {
+ v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>()) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ HandleScope scope(isolate);
+ context_.Reset(isolate,
+ Context::New(isolate,
+ extensions,
+ global_template,
+ global_object));
context_->Enter();
// We can't do this later perhaps because of a fatal error.
isolate_ = context_->GetIsolate();
@@ -219,14 +230,15 @@
bool IsReady() { return !context_.IsEmpty(); }
v8::Local<v8::Context> local() {
- return v8::Local<v8::Context>::New(context_);
+ return v8::Local<v8::Context>::New(isolate_, context_);
}
private:
v8::Persistent<v8::Context> context_;
v8::Isolate* isolate_;
};
-
+}
+typedef v8::LocalContext LocalContext;
static inline v8::Local<v8::Value> v8_num(double x) {
return v8::Number::New(x);
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index a2da448..e5523b2 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -39,6 +39,9 @@
# when snapshot is on, so I am marking it PASS || FAIL
test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+# BUG(2628): This test is flaky and sometimes fails, but should not crash.
+test-cpu-profiler/CollectCpuProfile: PASS || FAIL
+
# These tests always fail. They are here to test test.py. If
# they don't fail then test.py has failed.
test-serialize/TestThatAlwaysFails: FAIL
@@ -105,4 +108,4 @@
test-sockets/Socket: SKIP
# Profiling doesn't work on Native Client.
-test-cpu-profiler/*
+test-cpu-profiler/*: SKIP
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index d71247c..bc469aa 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -95,8 +95,8 @@
TEST(StressHandles) {
- v8::Persistent<v8::Context> env = v8::Context::New();
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
env->Enter();
Handle<Object> o = Test();
CHECK(o->IsSmi() && Smi::cast(*o)->value() == 42);
@@ -117,8 +117,8 @@
TEST(StressJS) {
- v8::Persistent<v8::Context> env = v8::Context::New();
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
env->Enter();
Handle<JSFunction> function =
FACTORY->NewFunction(FACTORY->function_string(), FACTORY->null_value());
@@ -142,8 +142,7 @@
CallbacksDescriptor d(*name,
*foreign,
- static_cast<PropertyAttributes>(0),
- v8::internal::PropertyDetails::kInitialIndex);
+ static_cast<PropertyAttributes>(0));
map->AppendDescriptor(&d, witness);
// Add the Foo constructor the global object.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 255397b..c9685f8 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -32,6 +32,11 @@
#include <unistd.h> // getpid
#endif // WIN32
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "api.h"
@@ -162,8 +167,8 @@
THREADED_TEST(IsolateOfContext) {
- v8::Persistent<Context> env = Context::New();
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Handle<Context> env = Context::New(v8::Isolate::GetCurrent());
CHECK(!env->InContext());
CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
@@ -173,8 +178,6 @@
env->Exit();
CHECK(!env->InContext());
CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
-
- env.Dispose(env->GetIsolate());
}
@@ -289,8 +292,8 @@
v8::HandleScope scope(env->GetIsolate());
v8::Handle<v8::Primitive> undef = v8::Undefined();
Local<String> undef_str = undef->ToString();
- char* value = i::NewArray<char>(undef_str->Length() + 1);
- undef_str->WriteAscii(value);
+ char* value = i::NewArray<char>(undef_str->Utf8Length() + 1);
+ undef_str->WriteUtf8(value);
CHECK_EQ(0, strcmp(value, "undefined"));
i::DeleteArray(value);
}
@@ -2254,6 +2257,7 @@
THREADED_TEST(ArrayBuffer) {
+ i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
LocalContext env;
@@ -2319,6 +2323,9 @@
}
+
+
+
THREADED_TEST(HiddenProperties) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -2480,6 +2487,58 @@
}
+THREADED_TEST(ResettingGlobalHandle) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::internal::GlobalHandles* global_handles = NULL;
+ int initial_handle_count = 0;
+ v8::Persistent<String> global;
+ {
+ v8::HandleScope scope(isolate);
+ Local<String> str = v8_str("str");
+ global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ initial_handle_count = global_handles->NumberOfGlobalHandles();
+ global = v8::Persistent<String>::New(isolate, str);
+ }
+ CHECK_EQ(global->Length(), 3);
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
+ {
+ v8::HandleScope scope(isolate);
+ Local<String> str = v8_str("longer");
+ global.Reset(isolate, str);
+ }
+ CHECK_EQ(global->Length(), 6);
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
+ global.Dispose(isolate);
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count);
+}
+
+
+THREADED_TEST(ResettingGlobalHandleToEmpty) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::internal::GlobalHandles* global_handles = NULL;
+ int initial_handle_count = 0;
+ v8::Persistent<String> global;
+ {
+ v8::HandleScope scope(isolate);
+ Local<String> str = v8_str("str");
+ global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ initial_handle_count = global_handles->NumberOfGlobalHandles();
+ global = v8::Persistent<String>::New(isolate, str);
+ }
+ CHECK_EQ(global->Length(), 3);
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count + 1);
+ {
+ v8::HandleScope scope(isolate);
+ Local<String> empty;
+ global.Reset(isolate, empty);
+ }
+ CHECK(global.IsEmpty());
+ CHECK_EQ(global_handles->NumberOfGlobalHandles(), initial_handle_count);
+}
+
+
THREADED_TEST(LocalHandle) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
@@ -2503,12 +2562,11 @@
static void WeakPointerCallback(v8::Isolate* isolate,
- Persistent<Value> handle,
- void* id) {
- WeakCallCounter* counter = reinterpret_cast<WeakCallCounter*>(id);
+ Persistent<Object>* handle,
+ WeakCallCounter* counter) {
CHECK_EQ(1234, counter->id());
counter->increment();
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -2531,23 +2589,23 @@
g1s1 = Persistent<Object>::New(iso, Object::New());
g1s2 = Persistent<Object>::New(iso, Object::New());
g1c1 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s1 = Persistent<Object>::New(iso, Object::New());
g2s2 = Persistent<Object>::New(iso, Object::New());
g2c1 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
}
Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
// Connect group 1 and 2, make a cycle.
- CHECK(g1s2->Set(0, g2s2));
- CHECK(g2s1->Set(0, g1s1));
+ CHECK(g1s2->Set(0, Handle<Object>(*g2s2)));
+ CHECK(g2s1->Set(0, Handle<Object>(*g1s1)));
{
Persistent<Value> g1_objects[] = { g1s1, g1s2 };
@@ -2566,7 +2624,7 @@
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, &counter, &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
g1c1.ClearWeak(iso);
@@ -2590,8 +2648,8 @@
CHECK_EQ(5, counter.NumberOfWeakCalls());
// And now make children weak again and collect them.
- g1c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
@@ -2617,23 +2675,23 @@
g1s1 = Persistent<Object>::New(iso, Object::New());
g1s2 = Persistent<Object>::New(iso, Object::New());
g1c1 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s1 = Persistent<Object>::New(iso, Object::New());
g2s2 = Persistent<Object>::New(iso, Object::New());
g2c1 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
}
Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
// Connect group 1 and 2, make a cycle.
- CHECK(g1s2->Set(0, g2s2));
- CHECK(g2s1->Set(0, g1s1));
+ CHECK(g1s2->Set(0, Local<Value>(*g2s2)));
+ CHECK(g2s1->Set(0, Local<Value>(*g1s1)));
{
UniqueId id1(reinterpret_cast<intptr_t>(*g1s1));
@@ -2654,7 +2712,7 @@
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, &counter, &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
g1c1.ClearWeak(iso);
@@ -2678,8 +2736,8 @@
CHECK_EQ(5, counter.NumberOfWeakCalls());
// And now make children weak again and collect them.
- g1c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1c1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2c1.MakeWeak(iso, &counter, &WeakPointerCallback);
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
@@ -2706,29 +2764,29 @@
HandleScope scope(iso);
g1s1 = Persistent<Object>::New(iso, Object::New());
g1s2 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g1s1.IsWeak(iso));
CHECK(g1s2.IsWeak(iso));
g2s1 = Persistent<Object>::New(iso, Object::New());
g2s2 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g2s1.IsWeak(iso));
CHECK(g2s2.IsWeak(iso));
g3s1 = Persistent<Object>::New(iso, Object::New());
g3s2 = Persistent<Object>::New(iso, Object::New());
- g3s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g3s1.IsWeak(iso));
CHECK(g3s2.IsWeak(iso));
g4s1 = Persistent<Object>::New(iso, Object::New());
g4s2 = Persistent<Object>::New(iso, Object::New());
- g4s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g4s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g4s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g4s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g4s1.IsWeak(iso));
CHECK(g4s2.IsWeak(iso));
}
@@ -2763,7 +2821,7 @@
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, &counter, &WeakPointerCallback);
// Groups are deleted, rebuild groups.
{
@@ -2812,29 +2870,29 @@
HandleScope scope(iso);
g1s1 = Persistent<Object>::New(iso, Object::New());
g1s2 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g1s1.IsWeak(iso));
CHECK(g1s2.IsWeak(iso));
g2s1 = Persistent<Object>::New(iso, Object::New());
g2s2 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g2s1.IsWeak(iso));
CHECK(g2s2.IsWeak(iso));
g3s1 = Persistent<Object>::New(iso, Object::New());
g3s2 = Persistent<Object>::New(iso, Object::New());
- g3s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g3s1.IsWeak(iso));
CHECK(g3s2.IsWeak(iso));
g4s1 = Persistent<Object>::New(iso, Object::New());
g4s2 = Persistent<Object>::New(iso, Object::New());
- g4s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g4s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g4s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g4s2.MakeWeak(iso, &counter, &WeakPointerCallback);
CHECK(g4s1.IsWeak(iso));
CHECK(g4s2.IsWeak(iso));
}
@@ -2871,7 +2929,7 @@
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, &counter, &WeakPointerCallback);
// Groups are deleted, rebuild groups.
{
@@ -2922,18 +2980,18 @@
HandleScope scope(iso);
g1s1 = Persistent<Object>::New(iso, Object::New());
g1s2 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s1 = Persistent<Object>::New(iso, Object::New());
g2s2 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
g3s1 = Persistent<Object>::New(iso, Object::New());
g3s2 = Persistent<Object>::New(iso, Object::New());
- g3s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
}
// Make a root.
@@ -2954,11 +3012,11 @@
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
V8::AddObjectGroup(g1_objects, 2);
- g1s1->Set(v8_str("x"), g2s1);
+ g1s1->Set(v8_str("x"), Handle<Object>(*g2s1));
V8::AddObjectGroup(g2_objects, 2);
- g2s1->Set(v8_str("x"), g3s1);
+ g2s1->Set(v8_str("x"), Handle<Object>(*g3s1));
V8::AddObjectGroup(g3_objects, 2);
- g3s1->Set(v8_str("x"), g1s1);
+ g3s1->Set(v8_str("x"), Handle<Object>(*g1s1));
}
HEAP->CollectGarbage(i::NEW_SPACE);
@@ -2967,7 +3025,7 @@
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, &counter, &WeakPointerCallback);
root.MarkPartiallyDependent(iso);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
@@ -2983,11 +3041,11 @@
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
V8::AddObjectGroup(g1_objects, 2);
- g1s1->Set(v8_str("x"), g2s1);
+ g1s1->Set(v8_str("x"), Handle<Object>(*g2s1));
V8::AddObjectGroup(g2_objects, 2);
- g2s1->Set(v8_str("x"), g3s1);
+ g2s1->Set(v8_str("x"), Handle<Object>(*g3s1));
V8::AddObjectGroup(g3_objects, 2);
- g3s1->Set(v8_str("x"), g1s1);
+ g3s1->Set(v8_str("x"), Handle<Object>(*g1s1));
}
HEAP->CollectGarbage(i::NEW_SPACE);
@@ -3019,18 +3077,18 @@
HandleScope scope(iso);
g1s1 = Persistent<Object>::New(iso, Object::New());
g1s2 = Persistent<Object>::New(iso, Object::New());
- g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g1s2.MakeWeak(iso, &counter, &WeakPointerCallback);
g2s1 = Persistent<Object>::New(iso, Object::New());
g2s2 = Persistent<Object>::New(iso, Object::New());
- g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g2s2.MakeWeak(iso, &counter, &WeakPointerCallback);
g3s1 = Persistent<Object>::New(iso, Object::New());
g3s2 = Persistent<Object>::New(iso, Object::New());
- g3s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s1.MakeWeak(iso, &counter, &WeakPointerCallback);
+ g3s2.MakeWeak(iso, &counter, &WeakPointerCallback);
}
// Make a root.
@@ -3049,13 +3107,13 @@
g3s2.MarkPartiallyDependent(iso);
iso->SetObjectGroupId(g1s1, UniqueId(1));
iso->SetObjectGroupId(g1s2, UniqueId(1));
- g1s1->Set(v8_str("x"), g2s1);
+ g1s1->Set(v8_str("x"), Local<Value>(*g2s1));
iso->SetObjectGroupId(g2s1, UniqueId(2));
iso->SetObjectGroupId(g2s2, UniqueId(2));
- g2s1->Set(v8_str("x"), g3s1);
+ g2s1->Set(v8_str("x"), Local<Value>(*g3s1));
iso->SetObjectGroupId(g3s1, UniqueId(3));
iso->SetObjectGroupId(g3s2, UniqueId(3));
- g3s1->Set(v8_str("x"), g1s1);
+ g3s1->Set(v8_str("x"), Local<Value>(*g1s1));
}
v8::internal::Heap* heap = reinterpret_cast<v8::internal::Isolate*>(
@@ -3066,7 +3124,7 @@
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, &counter, &WeakPointerCallback);
root.MarkPartiallyDependent(iso);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
@@ -3080,13 +3138,13 @@
g3s2.MarkPartiallyDependent(isolate);
iso->SetObjectGroupId(g1s1, UniqueId(1));
iso->SetObjectGroupId(g1s2, UniqueId(1));
- g1s1->Set(v8_str("x"), g2s1);
+ g1s1->Set(v8_str("x"), Local<Value>(*g2s1));
iso->SetObjectGroupId(g2s1, UniqueId(2));
iso->SetObjectGroupId(g2s2, UniqueId(2));
- g2s1->Set(v8_str("x"), g3s1);
+ g2s1->Set(v8_str("x"), Local<Value>(*g3s1));
iso->SetObjectGroupId(g3s1, UniqueId(3));
iso->SetObjectGroupId(g3s2, UniqueId(3));
- g3s1->Set(v8_str("x"), g1s1);
+ g3s1->Set(v8_str("x"), Local<Value>(*g1s1));
}
heap->CollectGarbage(i::NEW_SPACE);
@@ -4540,7 +4598,7 @@
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
- CHECK_EQ(v8_num(4), xValue);
+ CHECK_EQ(v8_num(4), Handle<Value>(*xValue));
xValue.Dispose(context->GetIsolate());
xValue = v8::Persistent<Value>();
}
@@ -4557,7 +4615,7 @@
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
script->Run();
- CHECK_EQ(v8_num(4), xValue);
+ CHECK_EQ(v8_num(4), Handle<Value>(*xValue));
xValue.Dispose(context->GetIsolate());
xValue = v8::Persistent<Value>();
}
@@ -4630,9 +4688,9 @@
THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
-
- v8::Persistent<Context> context1 = Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
Local<ObjectTemplate> templ = ObjectTemplate::New();
@@ -4667,8 +4725,6 @@
context1->Enter();
CompileRun("var obj = { x : 0 }; delete obj.x;");
context1->Exit();
-
- context1.Dispose(context1->GetIsolate());
}
@@ -5443,14 +5499,14 @@
THREADED_TEST(GlobalObjectTemplate) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
Local<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(v8_str("JSNI_Log"),
v8::FunctionTemplate::New(HandleLogDelegator));
- v8::Persistent<Context> context = Context::New(0, global_template);
+ v8::Local<Context> context = Context::New(isolate, 0, global_template);
Context::Scope context_scope(context);
Script::Compile(v8_str("JSNI_Log('LOG')"))->Run();
- context.Dispose(context->GetIsolate());
}
@@ -5465,7 +5521,8 @@
v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
CHECK_EQ(result, v8::Integer::New(4));
@@ -5477,7 +5534,8 @@
v8::RegisterExtension(new Extension("nulltest", NULL));
const char* extension_names[] = { "nulltest" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("1+3"))->Run();
CHECK_EQ(result, v8::Integer::New(4));
@@ -5496,7 +5554,8 @@
kEmbeddedExtensionSource));
const char* extension_names[] = { "srclentest_fail" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
CHECK_EQ(0, *context);
}
@@ -5512,7 +5571,8 @@
source_len));
const char* extension_names[1] = { extension_name.start() };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
@@ -5548,7 +5608,8 @@
v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
const char* extension_names[] = { "evaltest1", "evaltest2" };
v8::ExtensionConfiguration extensions(2, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
CHECK_EQ(result, v8::Integer::New(42));
@@ -5581,7 +5642,8 @@
v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
const char* extension_names[] = { "withtest1", "withtest2" };
v8::ExtensionConfiguration extensions(2, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
CHECK_EQ(result, v8::Integer::New(87));
@@ -5595,7 +5657,8 @@
Extension* extension = new Extension("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
v8::RegisterExtension(extension);
- v8::Handle<Context> context = Context::New();
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent());
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run();
CHECK_EQ(result, v8::Integer::New(4));
@@ -5614,7 +5677,8 @@
kSyntaxErrorInExtensionSource));
const char* extension_names[] = { "syntaxerror" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
CHECK(context.IsEmpty());
}
@@ -5631,7 +5695,8 @@
kExceptionInExtensionSource));
const char* extension_names[] = { "exception" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
CHECK(context.IsEmpty());
}
@@ -5652,7 +5717,8 @@
kNativeCallInExtensionSource));
const char* extension_names[] = { "nativecall" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str(kNativeCallTest))->Run();
CHECK_EQ(result, v8::Integer::New(3));
@@ -5688,7 +5754,8 @@
"native function foo();"));
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope lock(context);
v8::Handle<Value> result = Script::Compile(v8_str("foo(42);"))->Run();
CHECK_EQ(result, v8::Integer::New(42));
@@ -5703,7 +5770,8 @@
"native\nfunction foo();"));
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context(Context::New(&extensions));
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
CHECK(context.IsEmpty());
}
@@ -5718,7 +5786,8 @@
"nativ\\u0065 function foo();"));
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context(Context::New(&extensions));
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
CHECK(context.IsEmpty());
}
@@ -5858,7 +5927,8 @@
v8::RegisterExtension(new Extension("B", "", 1, bDeps));
last_location = NULL;
v8::ExtensionConfiguration config(1, bDeps);
- v8::Handle<Context> context = Context::New(&config);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &config);
CHECK(context.IsEmpty());
CHECK_NE(last_location, NULL);
}
@@ -5938,11 +6008,10 @@
};
static void HandleWeakReference(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- Snorkel* snorkel = reinterpret_cast<Snorkel*>(data);
+ v8::Persistent<v8::Value>* obj,
+ Snorkel* snorkel) {
delete snorkel;
- obj.ClearWeak(isolate);
+ obj->ClearWeak(isolate);
}
v8::Handle<Value> WhammyPropertyGetter(Local<String> name,
@@ -5957,7 +6026,9 @@
v8::Persistent<v8::Object>::New(info.GetIsolate(), obj);
if (!prev.IsEmpty()) {
prev->Set(v8_str("next"), obj);
- prev.MakeWeak(info.GetIsolate(), new Snorkel(), &HandleWeakReference);
+ prev.MakeWeak<Value, Snorkel>(info.GetIsolate(),
+ new Snorkel(),
+ &HandleWeakReference);
whammy->objects_[whammy->cursor_].Clear();
}
whammy->objects_[whammy->cursor_] = global;
@@ -5974,7 +6045,8 @@
v8::External::New(whammy));
const char* extension_list[] = { "v8/gc" };
v8::ExtensionConfiguration extensions(1, extension_list);
- v8::Persistent<Context> context = Context::New(&extensions);
+ v8::Handle<Context> context =
+ Context::New(v8::Isolate::GetCurrent(), &extensions);
Context::Scope context_scope(context);
v8::Handle<v8::Object> interceptor = templ->NewInstance();
@@ -5991,22 +6063,21 @@
v8::Handle<Value> result = CompileRun(code);
CHECK_EQ(4.0, result->NumberValue());
delete whammy;
- context.Dispose(context->GetIsolate());
}
static void DisposeAndSetFlag(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- obj.Dispose(isolate);
- obj.Clear();
- *(reinterpret_cast<bool*>(data)) = true;
+ v8::Persistent<v8::Object>* obj,
+ bool* data) {
+ obj->Dispose(isolate);
+ *(data) = true;
}
THREADED_TEST(IndependentWeakHandle) {
- v8::Persistent<Context> context = Context::New();
- v8::Isolate* iso = context->GetIsolate();
+ v8::Isolate* iso = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(iso);
+ v8::Handle<Context> context = Context::New(iso);
Context::Scope context_scope(context);
v8::Persistent<v8::Object> object_a, object_b;
@@ -6042,32 +6113,32 @@
static void ForceScavenge(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- obj.Dispose(isolate);
- obj.Clear();
- *(reinterpret_cast<bool*>(data)) = true;
+ v8::Persistent<v8::Object>* obj,
+ bool* data) {
+ obj->Dispose(isolate);
+ *(data) = true;
InvokeScavenge();
}
static void ForceMarkSweep(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- obj.Dispose(isolate);
- obj.Clear();
- *(reinterpret_cast<bool*>(data)) = true;
+ v8::Persistent<v8::Object>* obj,
+ bool* data) {
+ obj->Dispose(isolate);
+ *(data) = true;
InvokeMarkSweep();
}
THREADED_TEST(GCFromWeakCallbacks) {
- v8::Persistent<Context> context = Context::New();
- v8::Isolate* isolate = context->GetIsolate();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Handle<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
static const int kNumberOfGCTypes = 2;
- v8::NearDeathCallback gc_forcing_callback[kNumberOfGCTypes] =
+ typedef v8::WeakReferenceCallbacks<v8::Object, bool>::Revivable Callback;
+ Callback gc_forcing_callback[kNumberOfGCTypes] =
{&ForceScavenge, &ForceMarkSweep};
typedef void (*GCInvoker)();
@@ -6091,17 +6162,18 @@
static void RevivingCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> obj,
- void* data) {
- obj.ClearWeak(isolate);
- *(reinterpret_cast<bool*>(data)) = true;
+ v8::Persistent<v8::Object>* obj,
+ bool* data) {
+ obj->ClearWeak(isolate);
+ *(data) = true;
}
THREADED_TEST(IndependentHandleRevival) {
- v8::Persistent<Context> context = Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Handle<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
- v8::Isolate* isolate = context->GetIsolate();
v8::Persistent<v8::Object> object;
{
@@ -6542,7 +6614,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf));
CHECK_EQ(5, len);
len = str->Write(wbuf);
CHECK_EQ(5, len);
@@ -6552,7 +6624,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 0, 4);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 0, 4);
CHECK_EQ(4, len);
len = str->Write(wbuf, 0, 4);
CHECK_EQ(4, len);
@@ -6562,7 +6634,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 0, 5);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 0, 5);
CHECK_EQ(5, len);
len = str->Write(wbuf, 0, 5);
CHECK_EQ(5, len);
@@ -6572,7 +6644,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 0, 6);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 0, 6);
CHECK_EQ(5, len);
len = str->Write(wbuf, 0, 6);
CHECK_EQ(5, len);
@@ -6582,7 +6654,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 4, -1);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 4, -1);
CHECK_EQ(1, len);
len = str->Write(wbuf, 4, -1);
CHECK_EQ(1, len);
@@ -6592,7 +6664,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 4, 6);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 4, 6);
CHECK_EQ(1, len);
len = str->Write(wbuf, 4, 6);
CHECK_EQ(1, len);
@@ -6601,7 +6673,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 4, 1);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 4, 1);
CHECK_EQ(1, len);
len = str->Write(wbuf, 4, 1);
CHECK_EQ(1, len);
@@ -6611,7 +6683,7 @@
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteAscii(buf, 3, 1);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 3, 1);
CHECK_EQ(1, len);
len = str->Write(wbuf, 3, 1);
CHECK_EQ(1, len);
@@ -6633,7 +6705,10 @@
memset(buf, 0x1, sizeof(buf));
buf[5] = 'X';
- len = str->WriteAscii(buf, 0, 6, String::NO_NULL_TERMINATION);
+ len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf),
+ 0,
+ 6,
+ String::NO_NULL_TERMINATION);
CHECK_EQ(5, len);
CHECK_EQ('X', buf[5]);
CHECK_EQ(0, strncmp("abcde", buf, 5));
@@ -6664,18 +6739,13 @@
CHECK_EQ(0, strcmp(utf8buf, "abcde"));
memset(buf, 0x1, sizeof(buf));
- len = str3->WriteAscii(buf);
- CHECK_EQ(7, len);
- CHECK_EQ(0, strcmp("abc def", buf));
-
- memset(buf, 0x1, sizeof(buf));
- len = str3->WriteAscii(buf, 0, -1, String::PRESERVE_ASCII_NULL);
+ len = str3->WriteOneByte(reinterpret_cast<uint8_t*>(buf));
CHECK_EQ(7, len);
CHECK_EQ(0, strcmp("abc", buf));
CHECK_EQ(0, buf[3]);
CHECK_EQ(0, strcmp("def", buf + 4));
- CHECK_EQ(0, str->WriteAscii(NULL, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->WriteOneByte(NULL, 0, 0, String::NO_NULL_TERMINATION));
CHECK_EQ(0, str->WriteUtf8(NULL, 0, 0, String::NO_NULL_TERMINATION));
CHECK_EQ(0, str->Write(NULL, 0, 0, String::NO_NULL_TERMINATION));
}
@@ -7156,8 +7226,8 @@
global_template->SetAccessCheckCallbacks(NamedSecurityTestCallback,
IndexedSecurityTestCallback);
// Create an environment
- v8::Persistent<Context> context0 =
- Context::New(NULL, global_template);
+ v8::Handle<Context> context0 =
+ Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
context0->Enter();
v8::Handle<v8::Object> global0 = context0->Global();
@@ -7172,8 +7242,8 @@
// Create another environment, should fail security checks.
v8::HandleScope scope1(v8::Isolate::GetCurrent());
- v8::Persistent<Context> context1 =
- Context::New(NULL, global_template);
+ v8::Handle<Context> context1 =
+ Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
@@ -7205,17 +7275,14 @@
}
context1->Exit();
- context1.Dispose(context1->GetIsolate());
-
context0->Exit();
- context0.Dispose(context0->GetIsolate());
}
THREADED_TEST(SecurityChecks) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Persistent<Context> env2 = Context::New();
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -7251,8 +7318,6 @@
Function::Cast(*spy2)->Call(env2->Global(), 0, NULL);
CHECK(try_catch.HasCaught());
}
-
- env2.Dispose(env2->GetIsolate());
}
@@ -7260,7 +7325,7 @@
THREADED_TEST(SecurityChecksForPrototypeChain) {
LocalContext current;
v8::HandleScope scope(current->GetIsolate());
- v8::Persistent<Context> other = Context::New();
+ v8::Handle<Context> other = Context::New(current->GetIsolate());
// Change context to be able to get to the Object function in the
// other context without hitting the security checks.
@@ -7321,14 +7386,13 @@
CHECK(!access_f3->Run()->Equals(v8_num(101)));
CHECK(access_f3->Run()->IsUndefined());
}
- other.Dispose(other->GetIsolate());
}
THREADED_TEST(CrossDomainDelete) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Persistent<Context> env2 = Context::New();
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -7353,15 +7417,13 @@
Local<Value> v = env1->Global()->Get(v8_str("prop"));
CHECK(v->IsNumber());
CHECK_EQ(3, v->Int32Value());
-
- env2.Dispose(env2->GetIsolate());
}
THREADED_TEST(CrossDomainIsPropertyEnumerable) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Persistent<Context> env2 = Context::New();
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -7388,15 +7450,13 @@
Local<Value> result = Script::Compile(test)->Run();
CHECK(result->IsFalse());
}
-
- env2.Dispose(env2->GetIsolate());
}
THREADED_TEST(CrossDomainForIn) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Persistent<Context> env2 = Context::New();
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -7422,14 +7482,13 @@
"return true;})()");
CHECK(result->IsTrue());
}
- env2.Dispose(env2->GetIsolate());
}
TEST(ContextDetachGlobal) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Persistent<Context> env2 = Context::New();
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
Local<v8::Object> global1 = env1->Global();
@@ -7456,8 +7515,10 @@
// env2 has a new global object.
CHECK(!env2->Global()->Equals(global2));
- v8::Persistent<Context> env3 =
- Context::New(0, v8::Handle<v8::ObjectTemplate>(), global2);
+ v8::Handle<Context> env3 = Context::New(env1->GetIsolate(),
+ 0,
+ v8::Handle<v8::ObjectTemplate>(),
+ global2);
env3->SetSecurityToken(v8_str("bar"));
env3->Enter();
@@ -7484,9 +7545,6 @@
Local<Value> r = global3->Get(v8_str("prop2"));
CHECK(r->IsUndefined());
}
-
- env2.Dispose(env2->GetIsolate());
- env3.Dispose(env3->GetIsolate());
}
@@ -7495,7 +7553,7 @@
v8::HandleScope scope(env1->GetIsolate());
// Create second environment.
- v8::Persistent<Context> env2 = Context::New();
+ v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
@@ -7527,8 +7585,10 @@
CHECK(result->IsUndefined());
// Reuse global2 for env3.
- v8::Persistent<Context> env3 =
- Context::New(0, v8::Handle<v8::ObjectTemplate>(), global2);
+ v8::Handle<Context> env3 = Context::New(env1->GetIsolate(),
+ 0,
+ v8::Handle<v8::ObjectTemplate>(),
+ global2);
CHECK_EQ(global2, env3->Global());
// Start by using the same security token for env3 as for env1 and env2.
@@ -7563,9 +7623,6 @@
result = CompileRun("other.p");
CHECK(result->IsInt32());
CHECK_EQ(42, result->Int32Value());
-
- env2.Dispose(env2->GetIsolate());
- env3.Dispose(env3->GetIsolate());
}
@@ -7617,7 +7674,8 @@
TEST(AccessControl) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetAccessCheckCallbacks(NamedAccessBlocker,
@@ -7637,7 +7695,7 @@
v8::DEFAULT);
// Create an environment
- v8::Persistent<Context> context0 = Context::New(NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
v8::Handle<v8::Object> global0 = context0->Global();
@@ -7663,9 +7721,9 @@
Local<Value> el_getter = global0->Get(v8_str("el_getter"));
Local<Value> el_setter = global0->Get(v8_str("el_setter"));
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(isolate);
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
@@ -7855,13 +7913,12 @@
context1->Exit();
context0->Exit();
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
TEST(AccessControlES5) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetAccessCheckCallbacks(NamedAccessBlocker,
@@ -7882,12 +7939,12 @@
v8::DEFAULT);
// Create an environment
- v8::Persistent<Context> context0 = Context::New(NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
v8::Handle<v8::Object> global0 = context0->Global();
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
global1->Set(v8_str("other"), global0);
@@ -7947,7 +8004,8 @@
THREADED_TEST(AccessControlGetOwnPropertyNames) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
obj_template->Set(v8_str("x"), v8::Integer::New(42));
@@ -7955,14 +8013,14 @@
GetOwnPropertyNamesIndexedBlocker);
// Create an environment
- v8::Persistent<Context> context0 = Context::New(NULL, obj_template);
+ v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
context0->Enter();
v8::Handle<v8::Object> global0 = context0->Global();
v8::HandleScope scope1(v8::Isolate::GetCurrent());
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
@@ -7984,8 +8042,6 @@
context1->Exit();
context0->Exit();
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
@@ -8041,7 +8097,8 @@
THREADED_TEST(CrossDomainAccessors) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
v8::Handle<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
@@ -8063,7 +8120,7 @@
v8::Handle<Value>(),
v8::DEFAULT);
- v8::Persistent<Context> context0 = Context::New(NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
Local<v8::Object> global = context0->Global();
@@ -8072,7 +8129,7 @@
// Enter a new context.
v8::HandleScope scope1(v8::Isolate::GetCurrent());
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
v8::Handle<v8::Object> global1 = context1->Global();
@@ -8088,8 +8145,6 @@
context1->Exit();
context0->Exit();
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
@@ -8119,10 +8174,11 @@
named_access_count = 0;
indexed_access_count = 0;
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
// Create an environment.
- v8::Persistent<Context> context0 = Context::New();
+ v8::Local<Context> context0 = Context::New(isolate);
context0->Enter();
// Create an object that requires access-check functions to be
@@ -8132,10 +8188,10 @@
IndexedAccessCounter);
Local<v8::Object> object = object_template->NewInstance();
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(isolate);
// Create another environment.
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
// Make easy access to the object from the other environment.
@@ -8223,8 +8279,6 @@
context1->Exit();
context0->Exit();
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
@@ -8238,7 +8292,7 @@
CHECK(name->IsString());
memset(buf, 0x1, sizeof(buf));
- len = name.As<String>()->WriteAscii(buf);
+ len = name.As<String>()->WriteOneByte(reinterpret_cast<uint8_t*>(buf));
CHECK_EQ(4, len);
uint16_t buf2[100];
@@ -8268,10 +8322,11 @@
named_access_count = 0;
indexed_access_count = 0;
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
// Create an environment.
- v8::Persistent<Context> context0 = Context::New();
+ v8::Local<Context> context0 = Context::New(isolate);
context0->Enter();
// Create an object that requires access-check functions to be
@@ -8281,10 +8336,10 @@
IndexedAccessFlatten);
Local<v8::Object> object = object_template->NewInstance();
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(isolate);
// Create another environment.
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
// Make easy access to the object from the other environment.
@@ -8298,8 +8353,6 @@
context1->Exit();
context0->Exit();
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
@@ -8332,10 +8385,11 @@
named_access_count = 0;
indexed_access_count = 0;
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
// Create an environment.
- v8::Persistent<Context> context0 = Context::New();
+ v8::Local<Context> context0 = Context::New(isolate);
context0->Enter();
// Create an object that requires access-check functions to be
@@ -8350,10 +8404,10 @@
AccessControlIndexedSetter);
Local<v8::Object> object = object_template->NewInstance();
- v8::HandleScope scope1(v8::Isolate::GetCurrent());
+ v8::HandleScope scope1(isolate);
// Create another environment.
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
// Make easy access to the object from the other environment.
@@ -8390,8 +8444,6 @@
context1->Exit();
context0->Exit();
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
@@ -9252,10 +9304,11 @@
// its global throws an exception. This behavior is consistent with
// other JavaScript implementations.
THREADED_TEST(EvalInDetachedGlobal) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
- v8::Persistent<Context> context0 = Context::New();
- v8::Persistent<Context> context1 = Context::New();
+ v8::Local<Context> context0 = Context::New(isolate);
+ v8::Local<Context> context1 = Context::New(isolate);
// Set up function in context0 that uses eval from context0.
context0->Enter();
@@ -9280,9 +9333,6 @@
CHECK(x_value.IsEmpty());
CHECK(catcher.HasCaught());
context1->Exit();
-
- context1.Dispose(context1->GetIsolate());
- context0.Dispose(context0->GetIsolate());
}
@@ -9634,7 +9684,7 @@
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
return v8_str("x")->Equals(name)
- ? v8::Integer::New(42) : v8::Handle<v8::Value>();
+ ? v8::Handle<v8::Value>(v8::Integer::New(42)) : v8::Handle<v8::Value>();
}
@@ -11803,11 +11853,11 @@
v8::Persistent<v8::Object> bad_handle;
void NewPersistentHandleCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void*) {
v8::HandleScope scope(isolate);
bad_handle = v8::Persistent<v8::Object>::New(isolate, some_object);
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -11826,7 +11876,9 @@
// global handle nodes are processed by PostGarbageCollectionProcessing
// in reverse allocation order, so if second allocated handle is deleted,
// weak callback of the first handle would be able to 'reallocate' it.
- handle1.MakeWeak(isolate, NULL, NewPersistentHandleCallback);
+ handle1.MakeWeak<v8::Value, void>(isolate,
+ NULL,
+ NewPersistentHandleCallback);
handle2.Dispose(isolate);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -11835,11 +11887,11 @@
v8::Persistent<v8::Object> to_be_disposed;
void DisposeAndForceGcCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void*) {
to_be_disposed.Dispose(isolate);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -11853,23 +11905,23 @@
handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
- handle1.MakeWeak(isolate, NULL, DisposeAndForceGcCallback);
+ handle1.MakeWeak<v8::Value, void>(isolate, NULL, DisposeAndForceGcCallback);
to_be_disposed = handle2;
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
void DisposingCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void*) {
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
void HandleCreatingCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void*) {
v8::HandleScope scope(isolate);
v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -11884,8 +11936,8 @@
handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
- handle2.MakeWeak(isolate, NULL, DisposingCallback);
- handle3.MakeWeak(isolate, NULL, HandleCreatingCallback);
+ handle2.MakeWeak<v8::Value, void>(isolate, NULL, DisposingCallback);
+ handle3.MakeWeak<v8::Value, void>(isolate, NULL, HandleCreatingCallback);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -11913,7 +11965,7 @@
}
-static v8::Handle<Value> NestedScope(v8::Persistent<Context> env) {
+static v8::Handle<Value> NestedScope(v8::Local<Context> env) {
v8::HandleScope inner(env->GetIsolate());
env->Enter();
v8::Handle<Value> three = v8_num(3);
@@ -11924,14 +11976,14 @@
THREADED_TEST(NestedHandleScopeAndContexts) {
- v8::HandleScope outer(v8::Isolate::GetCurrent());
- v8::Persistent<Context> env = Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer(isolate);
+ v8::Local<Context> env = Context::New(isolate);
env->Enter();
v8::Handle<Value> value = NestedScope(env);
v8::Handle<String> str(value->ToString());
CHECK(!str.IsEmpty());
env->Exit();
- env.Dispose(env->GetIsolate());
}
@@ -11963,8 +12015,9 @@
bar_ptr = NULL;
foo_ptr = NULL;
- v8::HandleScope outer(v8::Isolate::GetCurrent());
- v8::Persistent<Context> env = Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer(isolate);
+ v8::Local<Context> env = Context::New(isolate);
env->Enter();
const char* script =
@@ -12290,11 +12343,11 @@
THREADED_TEST(ExternalAllocatedMemory) {
- v8::HandleScope outer(v8::Isolate::GetCurrent());
- v8::Persistent<Context> env(Context::New());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer(isolate);
+ v8::Local<Context> env(Context::New(isolate));
CHECK(!env.IsEmpty());
const intptr_t kSize = 1024*1024;
- v8::Isolate* isolate = env->GetIsolate();
int64_t baseline = cast(isolate->AdjustAmountOfExternalAllocatedMemory(0));
CHECK_EQ(baseline + cast(kSize),
cast(isolate->AdjustAmountOfExternalAllocatedMemory(kSize)));
@@ -12305,8 +12358,14 @@
THREADED_TEST(DisposeEnteredContext) {
LocalContext outer;
- v8::HandleScope scope(outer->GetIsolate());
- { v8::Persistent<v8::Context> inner = v8::Context::New();
+ v8::Isolate* isolate = outer->GetIsolate();
+ v8::Persistent<v8::Context> inner;
+ {
+ v8::HandleScope scope(isolate);
+ inner.Reset(isolate, v8::Context::New(isolate));
+ }
+ v8::HandleScope scope(isolate);
+ {
inner->Enter();
inner.Dispose(inner->GetIsolate());
inner.Clear();
@@ -12626,7 +12685,8 @@
// This tests that access check information remains on the global
// object template when creating contexts.
THREADED_TEST(AccessControlRepeatedContextCreation) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->SetAccessCheckCallbacks(NamedSetAccessBlocker,
IndexedSetAccessBlocker);
@@ -12636,14 +12696,15 @@
i::Handle<i::FunctionTemplateInfo> constructor(
i::FunctionTemplateInfo::cast(internal_template->constructor()));
CHECK(!constructor->access_check_info()->IsUndefined());
- v8::Persistent<Context> context0(Context::New(NULL, global_template));
+ v8::Local<Context> context0(Context::New(isolate, NULL, global_template));
CHECK(!context0.IsEmpty());
CHECK(!constructor->access_check_info()->IsUndefined());
}
THREADED_TEST(TurnOnAccessCheck) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
// Create an environment with access check to the global object disabled by
// default.
@@ -12652,7 +12713,7 @@
IndexedGetAccessBlocker,
v8::Handle<v8::Value>(),
false);
- v8::Persistent<Context> context = Context::New(NULL, global_template);
+ v8::Local<Context> context = Context::New(isolate, NULL, global_template);
Context::Scope context_scope(context);
// Set up a property and a number of functions.
@@ -12723,7 +12784,8 @@
THREADED_TEST(TurnOnAccessCheckAndRecompile) {
- v8::HandleScope handle_scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
// Create an environment with access check to the global object disabled by
// default. When the registered access checker will block access to properties
@@ -12733,7 +12795,7 @@
IndexedGetAccessBlocker,
v8::Handle<v8::Value>(),
false);
- v8::Persistent<Context> context = Context::New(NULL, global_template);
+ v8::Local<Context> context = Context::New(isolate, NULL, global_template);
Context::Scope context_scope(context);
// Set up a property and a number of functions.
@@ -12992,9 +13054,10 @@
// Test that cross-context new calls use the context of the callee to
// create the new JavaScript object.
THREADED_TEST(CrossContextNew) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Persistent<Context> context0 = Context::New();
- v8::Persistent<Context> context1 = Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context0 = Context::New(isolate);
+ v8::Local<Context> context1 = Context::New(isolate);
// Allow cross-domain access.
Local<String> token = v8_str("<security token>");
@@ -13015,10 +13078,6 @@
CHECK(value->IsInt32());
CHECK_EQ(42, value->Int32Value());
context1->Exit();
-
- // Dispose the contexts to allow them to be garbage collected.
- context0.Dispose(context0->GetIsolate());
- context1.Dispose(context1->GetIsolate());
}
@@ -13799,9 +13858,10 @@
TEST(InlinedFunctionAcrossContexts) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> ctx1 = v8::Context::New();
- v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer_scope(isolate);
+ v8::Local<v8::Context> ctx1 = v8::Context::New(isolate);
+ v8::Local<v8::Context> ctx2 = v8::Context::New(isolate);
ctx1->Enter();
{
@@ -13831,15 +13891,13 @@
"ReferenceError: G is not defined");
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose(ctx1->GetIsolate());
}
- ctx2.Dispose(ctx2->GetIsolate());
}
-v8::Persistent<Context> calling_context0;
-v8::Persistent<Context> calling_context1;
-v8::Persistent<Context> calling_context2;
+static v8::Local<Context> calling_context0;
+static v8::Local<Context> calling_context1;
+static v8::Local<Context> calling_context2;
// Check that the call to the callback is initiated in
@@ -13856,11 +13914,15 @@
THREADED_TEST(GetCallingContext) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
- calling_context0 = Context::New();
- calling_context1 = Context::New();
- calling_context2 = Context::New();
+ Local<Context> calling_context0(Context::New(isolate));
+ Local<Context> calling_context1(Context::New(isolate));
+ Local<Context> calling_context2(Context::New(isolate));
+ ::calling_context0 = calling_context0;
+ ::calling_context1 = calling_context1;
+ ::calling_context2 = calling_context2;
// Allow cross-domain access.
Local<String> token = v8_str("<security token>");
@@ -13891,14 +13953,9 @@
calling_context1->Global());
CompileRun("context1.f()");
calling_context2->Exit();
-
- // Dispose the contexts to allow them to be garbage collected.
- calling_context0.Dispose(calling_context0->GetIsolate());
- calling_context1.Dispose(calling_context1->GetIsolate());
- calling_context2.Dispose(calling_context2->GetIsolate());
- calling_context0.Clear();
- calling_context1.Clear();
- calling_context2.Clear();
+ ::calling_context0.Clear();
+ ::calling_context1.Clear();
+ ::calling_context2.Clear();
}
@@ -15099,6 +15156,7 @@
void TypedArrayTestHelper(v8::ExternalArrayType array_type,
int64_t low, int64_t high) {
const int kElementCount = 50;
+ i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;
LocalContext env;
@@ -15175,6 +15233,40 @@
}
+THREADED_TEST(Uint8ClampedArray) {
+ TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, i::ExternalPixelArray>(
+ v8::kExternalPixelArray, 0, 0xFF);
+}
+
+
+#define IS_TYPED_ARRAY_TEST(TypedArray) \
+ THREADED_TEST(Is##TypedArray) { \
+ i::FLAG_harmony_array_buffer = true; \
+ i::FLAG_harmony_typed_arrays = true; \
+ LocalContext env; \
+ v8::Isolate* isolate = env->GetIsolate(); \
+ v8::HandleScope handle_scope(isolate); \
+ \
+ Handle<Value> result = CompileRun( \
+ "var ab = new ArrayBuffer(128);" \
+ "new " #TypedArray "(ab)"); \
+ CHECK(result->Is##TypedArray()); \
+ }
+
+IS_TYPED_ARRAY_TEST(Uint8Array)
+IS_TYPED_ARRAY_TEST(Int8Array)
+IS_TYPED_ARRAY_TEST(Uint16Array)
+IS_TYPED_ARRAY_TEST(Int16Array)
+IS_TYPED_ARRAY_TEST(Uint32Array)
+IS_TYPED_ARRAY_TEST(Int32Array)
+IS_TYPED_ARRAY_TEST(Float32Array)
+IS_TYPED_ARRAY_TEST(Float64Array)
+IS_TYPED_ARRAY_TEST(Uint8ClampedArray)
+
+#undef IS_TYPED_ARRAY_TEST
+
+
+
THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
@@ -15708,17 +15800,20 @@
const int kShortIdlePauseInMs = 100;
const int kLongIdlePauseInMs = 1000;
LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(env->GetIsolate());
intptr_t initial_size = HEAP->SizeOfObjects();
// Send idle notification to start a round of incremental GCs.
v8::V8::IdleNotification(kShortIdlePauseInMs);
// Emulate 7 page reloads.
for (int i = 0; i < 7; i++) {
- v8::Persistent<v8::Context> ctx = v8::Context::New();
- ctx->Enter();
- CreateGarbageInOldSpace();
- ctx->Exit();
- ctx.Dispose(ctx->GetIsolate());
+ {
+ v8::HandleScope inner_scope(env->GetIsolate());
+ v8::Local<v8::Context> ctx = v8::Context::New(isolate);
+ ctx->Enter();
+ CreateGarbageInOldSpace();
+ ctx->Exit();
+ }
v8::V8::ContextDisposedNotification();
v8::V8::IdleNotification(kLongIdlePauseInMs);
}
@@ -16047,22 +16142,21 @@
TEST(Regress528) {
v8::V8::Initialize();
-
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Persistent<Context> context;
- v8::Persistent<Context> other_context;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> other_context;
int gc_count;
// Create a context used to keep the code from aging in the compilation
// cache.
- other_context = Context::New();
+ other_context = Context::New(isolate);
// Context-dependent context data creates reference from the compilation
// cache to the global object.
const char* source_simple = "1";
- context = Context::New();
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context = Context::New(isolate);
context->Enter();
Local<v8::String> obj = v8::String::New("");
@@ -16070,7 +16164,6 @@
CompileRun(source_simple);
context->Exit();
}
- context.Dispose(context->GetIsolate());
v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
@@ -16085,15 +16178,14 @@
// Eval in a function creates reference from the compilation cache to the
// global object.
const char* source_eval = "function f(){eval('1')}; f()";
- context = Context::New();
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context = Context::New(isolate);
context->Enter();
CompileRun(source_eval);
context->Exit();
}
- context.Dispose(context->GetIsolate());
v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
@@ -16108,9 +16200,9 @@
// Looking up the line number for an exception creates reference from the
// compilation cache to the global object.
const char* source_exception = "function f(){throw 1;} f()";
- context = Context::New();
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context = Context::New(isolate);
context->Enter();
v8::TryCatch try_catch;
@@ -16121,7 +16213,6 @@
CHECK_EQ(1, message->GetLineNumber());
context->Exit();
}
- context.Dispose(context->GetIsolate());
v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
@@ -16133,7 +16224,6 @@
CHECK_GE(2, gc_count);
CHECK_EQ(1, GetGlobalObjectsCount());
- other_context.Dispose(other_context->GetIsolate());
v8::V8::ContextDisposedNotification();
}
@@ -16850,11 +16940,15 @@
// Run isolate 1.
v8::Isolate* isolate1 = v8::Isolate::New();
isolate1->Enter();
- v8::Persistent<v8::Context> context1 = v8::Context::New();
+ v8::Persistent<v8::Context> context1;
+ {
+ v8::HandleScope scope(isolate1);
+ context1.Reset(isolate1, Context::New(isolate1));
+ }
{
- v8::Context::Scope cscope(context1);
v8::HandleScope scope(isolate1);
+ v8::Context::Scope cscope(isolate1, context1);
// Run something in new isolate.
CompileRun("var foo = 'isolate 1';");
ExpectString("function f() { return foo; }; f()", "isolate 1");
@@ -16866,9 +16960,9 @@
{
v8::Isolate::Scope iscope(isolate2);
- context2 = v8::Context::New();
- v8::Context::Scope cscope(context2);
v8::HandleScope scope(isolate2);
+ context2.Reset(isolate2, Context::New(isolate2));
+ v8::Context::Scope cscope(isolate2, context2);
// Run something in new isolate.
CompileRun("var foo = 'isolate 2';");
@@ -16876,8 +16970,8 @@
}
{
- v8::Context::Scope cscope(context1);
v8::HandleScope scope(isolate1);
+ v8::Context::Scope cscope(isolate1, context1);
// Now again in isolate 1
ExpectString("function f() { return foo; }; f()", "isolate 1");
}
@@ -16885,11 +16979,17 @@
isolate1->Exit();
// Run some stuff in default isolate.
- v8::Persistent<v8::Context> context_default = v8::Context::New();
+ v8::Persistent<v8::Context> context_default;
+ {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ context_default.Reset(isolate, Context::New(isolate));
+ }
{
- v8::Context::Scope cscope(context_default);
v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Context::Scope cscope(v8::Isolate::GetCurrent(), context_default);
// Variables in other isolates should be not available, verify there
// is an exception.
ExpectTrue("function f() {"
@@ -16908,14 +17008,14 @@
{
v8::Isolate::Scope iscope(isolate2);
- v8::Context::Scope cscope(context2);
v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Context::Scope cscope(isolate2, context2);
ExpectString("function f() { return foo; }; f()", "isolate 2");
}
{
- v8::Context::Scope cscope(context1);
v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Context::Scope cscope(v8::Isolate::GetCurrent(), context1);
ExpectString("function f() { return foo; }; f()", "isolate 1");
}
@@ -16940,8 +17040,8 @@
// Check that default isolate still runs.
{
- v8::Context::Scope cscope(context_default);
v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Context::Scope cscope(v8::Isolate::GetCurrent(), context_default);
ExpectTrue("function f() { return isDefaultIsolate; }; f()");
}
}
@@ -17011,11 +17111,11 @@
TEST(IsolateDifferentContexts) {
v8::Isolate* isolate = v8::Isolate::New();
- Persistent<v8::Context> context;
+ Local<v8::Context> context;
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- context = v8::Context::New();
+ context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("2");
CHECK(v->IsNumber());
@@ -17024,13 +17124,12 @@
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- context = v8::Context::New();
+ context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("22");
CHECK(v->IsNumber());
CHECK_EQ(22, static_cast<int>(v->NumberValue()));
}
- isolate->Dispose();
}
class InitDefaultIsolateThread : public v8::internal::Thread {
@@ -17300,7 +17399,7 @@
v8::Persistent<v8::Object> visited =
v8::Persistent<v8::Object>::Cast(value);
CHECK_EQ(42, visited.WrapperClassId(v8::Isolate::GetCurrent()));
- CHECK_EQ(object_, visited);
+ CHECK_EQ(Handle<Value>(*object_), Handle<Value>(*visited));
++counter_;
}
}
@@ -17515,11 +17614,11 @@
THREADED_TEST(CreationContext) {
HandleScope handle_scope(v8::Isolate::GetCurrent());
- Persistent<Context> context1 = Context::New();
+ Handle<Context> context1 = Context::New(v8::Isolate::GetCurrent());
InstallContextId(context1, 1);
- Persistent<Context> context2 = Context::New();
+ Handle<Context> context2 = Context::New(v8::Isolate::GetCurrent());
InstallContextId(context2, 2);
- Persistent<Context> context3 = Context::New();
+ Handle<Context> context3 = Context::New(v8::Isolate::GetCurrent());
InstallContextId(context3, 3);
Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New();
@@ -17593,16 +17692,12 @@
CHECK(instance2->CreationContext() == context2);
CheckContextId(instance2, 2);
}
-
- context1.Dispose(context1->GetIsolate());
- context2.Dispose(context2->GetIsolate());
- context3.Dispose(context3->GetIsolate());
}
THREADED_TEST(CreationContextOfJsFunction) {
HandleScope handle_scope(v8::Isolate::GetCurrent());
- Persistent<Context> context = Context::New();
+ Handle<Context> context = Context::New(v8::Isolate::GetCurrent());
InstallContextId(context, 1);
Local<Object> function;
@@ -17613,8 +17708,6 @@
CHECK(function->CreationContext() == context);
CheckContextId(function, 1);
-
- context.Dispose(context->GetIsolate());
}
@@ -17925,7 +18018,8 @@
THREADED_TEST(Regress93759) {
- HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ HandleScope scope(isolate);
// Template for object with security check.
Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New();
@@ -17946,7 +18040,7 @@
protected_hidden_proto_template->SetHiddenPrototype(true);
// Context for "foreign" objects used in test.
- Persistent<Context> context = v8::Context::New();
+ Local<Context> context = v8::Context::New(isolate);
context->Enter();
// Plain object, no security check.
@@ -18010,8 +18104,6 @@
Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
CHECK(result6->Equals(Undefined()));
-
- context.Dispose(context->GetIsolate());
}
@@ -18050,14 +18142,15 @@
THREADED_TEST(ForeignFunctionReceiver) {
- HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ HandleScope scope(isolate);
// Create two contexts with different "id" properties ('i' and 'o').
// Call a function both from its own context and from a the foreign
// context, and see what "this" is bound to (returning both "this"
// and "this.id" for comparison).
- Persistent<Context> foreign_context = v8::Context::New();
+ Local<Context> foreign_context = v8::Context::New(isolate);
foreign_context->Enter();
Local<Value> foreign_function =
CompileRun("function func() { return { 0: this.id, "
@@ -18138,8 +18231,6 @@
TestReceiver(o, context->Global(), "func()");
// Calling with no base.
TestReceiver(o, context->Global(), "(1,func)()");
-
- foreign_context.Dispose(foreign_context->GetIsolate());
}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 58ce5ec..8cce084 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "disassembler.h"
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 8d39ee7..7c8e70c 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -27,6 +27,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "disassembler.h"
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index 669475a..a989fbb 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -27,6 +27,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "macro-assembler.h"
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index f200435..cff0f82 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -28,6 +28,10 @@
#include <stdlib.h>
#include <wchar.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "compiler.h"
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index f5f5d65..d73be18 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -27,6 +27,10 @@
//
// Tests of profiles generator and utilities.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 3cc35e7..1afe890 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -29,6 +29,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "api.h"
@@ -132,12 +136,16 @@
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>(),
v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
- : context_(v8::Context::New(extensions, global_template, global_object)) {
+ : scope_(v8::Isolate::GetCurrent()),
+ context_(
+ v8::Context::New(v8::Isolate::GetCurrent(),
+ extensions,
+ global_template,
+ global_object)) {
context_->Enter();
}
inline ~DebugLocalContext() {
context_->Exit();
- context_.Dispose(context_->GetIsolate());
}
inline v8::Context* operator->() { return *context_; }
inline v8::Context* operator*() { return *context_; }
@@ -162,7 +170,8 @@
}
private:
- v8::Persistent<v8::Context> context_;
+ v8::HandleScope scope_;
+ v8::Local<v8::Context> context_;
};
@@ -675,7 +684,7 @@
} else {
CHECK(result->IsString());
v8::Handle<v8::String> function_name(result->ToString());
- function_name->WriteAscii(last_function_hit);
+ function_name->WriteUtf8(last_function_hit);
}
}
@@ -710,7 +719,7 @@
} else {
CHECK(result->IsString());
v8::Handle<v8::String> script_name(result->ToString());
- script_name->WriteAscii(last_script_name_hit);
+ script_name->WriteUtf8(last_script_name_hit);
}
}
@@ -726,7 +735,7 @@
result = result->ToString();
CHECK(result->IsString());
v8::Handle<v8::String> script_data(result->ToString());
- script_data->WriteAscii(last_script_data_hit);
+ script_data->WriteUtf8(last_script_data_hit);
}
}
@@ -746,7 +755,7 @@
result = result->ToString();
CHECK(result->IsString());
v8::Handle<v8::String> script_data(result->ToString());
- script_data->WriteAscii(last_script_data_hit);
+ script_data->WriteUtf8(last_script_data_hit);
}
}
}
@@ -4230,7 +4239,8 @@
// http://crbug.com/28933
// Test that debug break is disabled when bootstrapper is active.
TEST(NoBreakWhenBootstrapping) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
// Register a debug event listener which sets the break flag and counts.
v8::Debug::SetDebugEventListener(DebugEventCounter);
@@ -4245,8 +4255,8 @@
kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
- context.Dispose(context->GetIsolate());
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate, &extensions);
}
// Check that no DebugBreak events occured during the context creation.
CHECK_EQ(0, break_point_hit_count);
@@ -5153,7 +5163,9 @@
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
global_template->Set(v8::String::New("ThreadedAtBarrier1"),
v8::FunctionTemplate::New(ThreadedAtBarrier1));
- v8::Handle<v8::Context> context = v8::Context::New(NULL, global_template);
+ v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent(),
+ NULL,
+ global_template);
v8::Context::Scope context_scope(context);
CompileRun(source);
@@ -5529,7 +5541,9 @@
v8::FunctionTemplate::New(CheckDataParameter));
global_template->Set(v8::String::New("CheckClosure"),
v8::FunctionTemplate::New(CheckClosure));
- v8::Handle<v8::Context> context = v8::Context::New(NULL, global_template);
+ v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent(),
+ NULL,
+ global_template);
v8::Context::Scope context_scope(context);
// Compile a function for checking the number of JavaScript frames.
@@ -6227,7 +6241,7 @@
}
-static v8::Persistent<v8::Context> expected_context;
+static v8::Handle<v8::Context> expected_context;
static v8::Handle<v8::Value> expected_context_data;
@@ -6253,18 +6267,19 @@
// Checks that this data is set correctly and that when the debug message
// handler is called the expected context is the one active.
TEST(ContextData) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
v8::Debug::SetMessageHandler2(ContextCheckMessageHandler);
// Create two contexts.
- v8::Persistent<v8::Context> context_1;
- v8::Persistent<v8::Context> context_2;
+ v8::Handle<v8::Context> context_1;
+ v8::Handle<v8::Context> context_2;
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>();
v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
- context_1 = v8::Context::New(NULL, global_template, global_object);
- context_2 = v8::Context::New(NULL, global_template, global_object);
+ context_1 = v8::Context::New(isolate, NULL, global_template, global_object);
+ context_2 = v8::Context::New(isolate, NULL, global_template, global_object);
// Default data value is undefined.
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
@@ -6376,7 +6391,7 @@
} else {
CHECK(result->IsString());
v8::Handle<v8::String> function_name(result->ToString());
- function_name->WriteAscii(last_function_hit);
+ function_name->WriteUtf8(last_function_hit);
}
}
@@ -6425,10 +6440,11 @@
// Common part of EvalContextData and NestedBreakEventContextData tests.
static void ExecuteScriptForContextCheck() {
// Create a context.
- v8::Persistent<v8::Context> context_1;
+ v8::Handle<v8::Context> context_1;
v8::Handle<v8::ObjectTemplate> global_template =
v8::Handle<v8::ObjectTemplate>();
- context_1 = v8::Context::New(NULL, global_template);
+ context_1 =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global_template);
// Default data value is undefined.
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
@@ -6444,7 +6460,7 @@
// Enter and run function in the context.
{
v8::Context::Scope context_scope(context_1);
- expected_context = context_1;
+ expected_context = v8::Local<v8::Context>(*context_1);
expected_context_data = data_1;
v8::Local<v8::Function> f = CompileFunction(source, "f");
f->Call(context_1->Global(), 0, NULL);
@@ -6605,22 +6621,26 @@
script_collected_message_count = 0;
v8::HandleScope scope(isolate);
- { // Scope for the DebugLocalContext.
- DebugLocalContext env;
-
- // Request the loaded scripts to initialize the debugger script cache.
- debug->GetLoadedScripts();
-
- // Do garbage collection to ensure that only the script in this test will be
- // collected afterwards.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
-
- v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
- {
- v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
- v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
- }
+ v8::Persistent<v8::Context> context;
+ {
+ v8::HandleScope scope(isolate);
+ context.Reset(isolate, v8::Context::New(isolate));
}
+ context->Enter();
+
+ // Request the loaded scripts to initialize the debugger script cache.
+ debug->GetLoadedScripts();
+
+ // Do garbage collection to ensure that only the script in this test will be
+ // collected afterwards.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+ v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
+ v8::Script::Compile(v8::String::New("eval('a=1')"))->Run();
+ v8::Script::Compile(v8::String::New("eval('a=2')"))->Run();
+
+ context->Exit();
+ context.Dispose(isolate);
// Do garbage collection to collect the script above which is no longer
// referenced.
@@ -7079,14 +7099,14 @@
// Check that event details contain context where debug event occured.
TEST(DebugEventContext) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
expected_callback_data = v8::Int32::New(2010);
v8::Debug::SetDebugEventListener2(DebugEventContextChecker,
expected_callback_data);
- expected_context = v8::Context::New();
+ expected_context = v8::Context::New(isolate);
v8::Context::Scope context_scope(expected_context);
v8::Script::Compile(v8::String::New("(function(){debugger;})();"))->Run();
- expected_context.Dispose(expected_context->GetIsolate());
expected_context.Clear();
v8::Debug::SetDebugEventListener(NULL);
expected_context_data = v8::Handle<v8::Value>();
@@ -7176,7 +7196,7 @@
char fn[80];
CHECK(result->IsString());
v8::Handle<v8::String> function_name(result->ToString());
- function_name->WriteAscii(fn);
+ function_name->WriteUtf8(fn);
if (strcmp(fn, "bar") == 0) {
i::Deoptimizer::DeoptimizeAll(v8::internal::Isolate::Current());
debug_event_break_deoptimize_done = true;
diff --git a/test/cctest/test-declarative-accessors.cc b/test/cctest/test-declarative-accessors.cc
index 569848c..b09a29d 100644
--- a/test/cctest/test-declarative-accessors.cc
+++ b/test/cctest/test-declarative-accessors.cc
@@ -27,6 +27,9 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "cctest.h"
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index ed8da5c..6be5303 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -27,6 +27,11 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "heap.h"
@@ -113,7 +118,8 @@
void DeclarationContext::InitializeIfNeeded() {
if (is_initialized_) return;
- HandleScope scope(Isolate::GetCurrent());
+ Isolate* isolate = Isolate::GetCurrent();
+ HandleScope scope(isolate);
Local<FunctionTemplate> function = FunctionTemplate::New();
Local<Value> data = External::New(this);
GetHolder(function)->SetNamedPropertyHandler(&HandleGet,
@@ -121,10 +127,14 @@
&HandleQuery,
0, 0,
data);
- context_ = Context::New(0, function->InstanceTemplate(), Local<Value>());
+ context_.Reset(isolate,
+ Context::New(isolate,
+ 0,
+ function->InstanceTemplate(),
+ Local<Value>()));
context_->Enter();
is_initialized_ = true;
- PostInitializeContext(context_);
+ PostInitializeContext(Local<Context>::New(isolate, context_));
}
@@ -694,14 +704,14 @@
class SimpleContext {
public:
- SimpleContext() {
- context_ = Context::New();
+ SimpleContext()
+ : handle_scope_(Isolate::GetCurrent()),
+ context_(Context::New(Isolate::GetCurrent())) {
context_->Enter();
}
- virtual ~SimpleContext() {
+ ~SimpleContext() {
context_->Exit();
- context_.Dispose(context_->GetIsolate());
}
void Check(const char* source,
@@ -732,7 +742,8 @@
}
private:
- Persistent<Context> context_;
+ HandleScope handle_scope_;
+ Local<Context> context_;
};
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 9f12232..8cba75b 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -28,6 +28,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "debug.h"
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index ca81a5a..f81b173 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -27,6 +27,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "debug.h"
diff --git a/test/cctest/test-func-name-inference.cc b/test/cctest/test-func-name-inference.cc
index 5b8293f..5ebc679 100644
--- a/test/cctest/test-func-name-inference.cc
+++ b/test/cctest/test-func-name-inference.cc
@@ -25,6 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "api.h"
diff --git a/test/cctest/test-global-object.cc b/test/cctest/test-global-object.cc
index b124b27..e8ffb8b 100644
--- a/test/cctest/test-global-object.cc
+++ b/test/cctest/test-global-object.cc
@@ -36,7 +36,7 @@
TEST(StrictUndeclaredGlobalVariable) {
HandleScope scope(Isolate::GetCurrent());
v8::Local<v8::String> var_name = v8_str("x");
- LocalContext context;
+ v8::LocalContext context;
v8::TryCatch try_catch;
v8::Local<v8::Script> script = v8_compile("\"use strict\"; x = 42;");
v8::Handle<v8::Object> proto = v8::Object::New();
diff --git a/test/cctest/test-hashing.cc b/test/cctest/test-hashing.cc
index 605b59b..1547613 100644
--- a/test/cctest/test-hashing.cc
+++ b/test/cctest/test-hashing.cc
@@ -27,6 +27,9 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "factory.h"
@@ -44,8 +47,6 @@
typedef uint32_t (*HASH_FUNCTION)();
-static v8::Persistent<v8::Context> env;
-
#define __ masm->
@@ -232,7 +233,10 @@
TEST(StringHash) {
- if (env.IsEmpty()) env = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(v8::Context::New(isolate));
+
for (uint8_t a = 0; a < String::kMaxOneByteCharCode; a++) {
// Numbers are hashed differently.
if (a >= '0' && a <= '9') continue;
@@ -250,7 +254,9 @@
TEST(NumberHash) {
- if (env.IsEmpty()) env = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(v8::Context::New(isolate));
// Some specific numbers
for (uint32_t key = 0; key < 42; key += 7) {
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index a4680e4..b2c9b72 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -29,6 +29,9 @@
#include <ctype.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "cctest.h"
@@ -1196,7 +1199,8 @@
objects_[i] =
v8::Persistent<v8::Object>::New(isolate_, v8::Object::New());
}
- (*env)->Global()->Set(v8_str("root_object"), objects_[0]);
+ (*env)->Global()->Set(v8_str("root_object"),
+ v8::Local<v8::Value>::New(isolate_, objects_[0]));
}
~GraphWithImplicitRefs() {
instance_ = NULL;
@@ -1410,7 +1414,7 @@
GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
v8::Local<v8::Number> js_n_prop =
js_obj->Get(v8_str("n_prop")).As<v8::Number>();
- CHECK(js_n_prop == n_prop->GetHeapValue());
+ CHECK(js_n_prop->NumberValue() == n_prop->GetHeapValue()->NumberValue());
}
@@ -1582,9 +1586,9 @@
static void PersistentHandleCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void*) {
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -1596,7 +1600,9 @@
v8::Persistent<v8::Object> handle =
v8::Persistent<v8::Object>::New(env->GetIsolate(), v8::Object::New());
- handle.MakeWeak(env->GetIsolate(), NULL, PersistentHandleCallback);
+ handle.MakeWeak<v8::Value, void>(env->GetIsolate(),
+ NULL,
+ PersistentHandleCallback);
CHECK(HasWeakGlobalHandle());
}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index c0bd08a..0711454 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -27,6 +27,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "compilation-cache.h"
@@ -392,10 +396,10 @@
static bool WeakPointerCleared = false;
static void TestWeakGlobalHandleCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void* id) {
if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -423,8 +427,8 @@
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &TestWeakGlobalHandleCallback);
+ &TestWeakGlobalHandleCallback,
+ NULL);
// Scavenge treats weak pointers as normal roots.
heap->PerformScavenge();
@@ -470,8 +474,8 @@
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &TestWeakGlobalHandleCallback);
+ &TestWeakGlobalHandleCallback,
+ NULL);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
@@ -507,8 +511,8 @@
global_handles->MakeWeak(h.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &TestWeakGlobalHandleCallback);
+ &TestWeakGlobalHandleCallback,
+ NULL);
// Scanvenge does not recognize weak reference.
heap->PerformScavenge();
@@ -1293,13 +1297,13 @@
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
- v8::Persistent<v8::Context> ctx[kNumTestContexts];
+ v8::Handle<v8::Context> ctx[kNumTestContexts];
CHECK_EQ(0, CountNativeContexts());
// Create a number of global contests which gets linked together.
for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i] = v8::Context::New();
+ ctx[i] = v8::Context::New(v8::Isolate::GetCurrent());
bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
@@ -1366,7 +1370,9 @@
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i].Dispose(ctx[i]->GetIsolate());
+ // TODO(dcarney): is there a better way to do this?
+ i::Object** unsafe = reinterpret_cast<i::Object**>(*ctx[i]);
+ *unsafe = HEAP->undefined_value();
ctx[i].Clear();
// Scavenge treats these references as strong.
@@ -1430,14 +1436,14 @@
static const int kNumTestContexts = 10;
HandleScope scope(isolate);
- v8::Persistent<v8::Context> ctx[kNumTestContexts];
+ v8::Handle<v8::Context> ctx[kNumTestContexts];
CHECK_EQ(0, CountNativeContexts());
// Create an number of contexts and check the length of the weak list both
// with and without GCs while iterating the list.
for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i] = v8::Context::New();
+ ctx[i] = v8::Context::New(v8::Isolate::GetCurrent());
CHECK_EQ(i + 1, CountNativeContexts());
CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1));
}
@@ -1655,9 +1661,15 @@
// optimized code.
TEST(LeakNativeContextViaMap) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> ctx1 = v8::Context::New();
- v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer_scope(isolate);
+ v8::Persistent<v8::Context> ctx1;
+ v8::Persistent<v8::Context> ctx2;
+ {
+ v8::HandleScope scope(isolate);
+ ctx1.Reset(isolate, v8::Context::New(isolate));
+ ctx2.Reset(isolate, v8::Context::New(isolate));
+ }
ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
@@ -1693,9 +1705,15 @@
// optimized code.
TEST(LeakNativeContextViaFunction) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> ctx1 = v8::Context::New();
- v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer_scope(isolate);
+ v8::Persistent<v8::Context> ctx1;
+ v8::Persistent<v8::Context> ctx2;
+ {
+ v8::HandleScope scope(isolate);
+ ctx1.Reset(isolate, v8::Context::New(isolate));
+ ctx2.Reset(isolate, v8::Context::New(isolate));
+ }
ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
@@ -1729,9 +1747,15 @@
TEST(LeakNativeContextViaMapKeyed) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> ctx1 = v8::Context::New();
- v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer_scope(isolate);
+ v8::Persistent<v8::Context> ctx1;
+ v8::Persistent<v8::Context> ctx2;
+ {
+ v8::HandleScope scope(isolate);
+ ctx1.Reset(isolate, v8::Context::New(isolate));
+ ctx2.Reset(isolate, v8::Context::New(isolate));
+ }
ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
@@ -1765,9 +1789,15 @@
TEST(LeakNativeContextViaMapProto) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> ctx1 = v8::Context::New();
- v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope outer_scope(isolate);
+ v8::Persistent<v8::Context> ctx1;
+ v8::Persistent<v8::Context> ctx2;
+ {
+ v8::HandleScope scope(isolate);
+ ctx1.Reset(isolate, v8::Context::New(isolate));
+ ctx2.Reset(isolate, v8::Context::New(isolate));
+ }
ctx1->Enter();
HEAP->CollectAllAvailableGarbage();
@@ -2373,7 +2403,7 @@
// Check size.
DescriptorArray* descriptors = internal_obj->map()->instance_descriptors();
ObjectHashTable* hashtable = ObjectHashTable::cast(
- internal_obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
+ internal_obj->RawFastPropertyAt(descriptors->GetFieldIndex(0)));
// HashTable header (5) and 4 initial entries (8).
CHECK_LE(hashtable->SizeFor(hashtable->length()), 13 * kPointerSize);
}
diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc
index a619c85..ca0f073 100644
--- a/test/cctest/test-lockers.cc
+++ b/test/cctest/test-lockers.cc
@@ -27,6 +27,9 @@
#include <limits.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "api.h"
@@ -70,7 +73,7 @@
v8::Isolate::Scope isolate_scope(isolate_);
CHECK_EQ(isolate_, v8::internal::Isolate::Current());
v8::HandleScope scope(isolate_);
- v8::Context::Scope context_scope(context_);
+ v8::Context::Scope context_scope(isolate_, context_);
Local<Value> v = CompileRun("getValue()");
CHECK(v->IsNumber());
CHECK_EQ(30, static_cast<int>(v->NumberValue()));
@@ -78,8 +81,8 @@
{
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
- v8::Context::Scope context_scope(context_);
v8::HandleScope scope(isolate_);
+ v8::Context::Scope context_scope(isolate_, context_);
Local<Value> v = CompileRun("getValue()");
CHECK(v->IsNumber());
CHECK_EQ(30, static_cast<int>(v->NumberValue()));
@@ -228,7 +231,7 @@
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New();
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
CHECK_EQ(isolate, v8::internal::Isolate::Current());
CalcFibAndCheck();
@@ -349,7 +352,7 @@
v8::Locker lock(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
HandleScope handle_scope(isolate_);
- v8::Context::Scope context_scope(context_);
+ v8::Context::Scope context_scope(isolate_, context_);
CalcFibAndCheck();
}
private:
@@ -470,19 +473,20 @@
}
virtual void Run() {
- Persistent<v8::Context> context1;
- Persistent<v8::Context> context2;
+ i::SmartPointer<LockIsolateAndCalculateFibSharedContextThread> thread;
v8::Locker lock1(isolate1_);
CHECK(v8::Locker::IsLocked(isolate1_));
CHECK(!v8::Locker::IsLocked(isolate2_));
{
v8::Isolate::Scope isolate_scope(isolate1_);
v8::HandleScope handle_scope(isolate1_);
- context1 = v8::Context::New();
+ v8::Handle<v8::Context> context1 = v8::Context::New(isolate1_);
{
v8::Context::Scope context_scope(context1);
CalcFibAndCheck();
}
+ thread.Reset(new LockIsolateAndCalculateFibSharedContextThread(
+ isolate1_, context1));
}
v8::Locker lock2(isolate2_);
CHECK(v8::Locker::IsLocked(isolate1_));
@@ -490,26 +494,14 @@
{
v8::Isolate::Scope isolate_scope(isolate2_);
v8::HandleScope handle_scope(isolate2_);
- context2 = v8::Context::New();
+ v8::Handle<v8::Context> context2 = v8::Context::New(isolate2_);
{
v8::Context::Scope context_scope(context2);
CalcFibAndCheck();
}
- }
- {
- i::SmartPointer<LockIsolateAndCalculateFibSharedContextThread> thread;
- {
- CHECK(v8::Locker::IsLocked(isolate1_));
- v8::Isolate::Scope isolate_scope(isolate1_);
- v8::HandleScope handle_scope(isolate1_);
- thread.Reset(new LockIsolateAndCalculateFibSharedContextThread(
- isolate1_, v8::Local<v8::Context>::New(isolate1_, context1)));
- }
v8::Unlocker unlock1(isolate1_);
CHECK(!v8::Locker::IsLocked(isolate1_));
CHECK(v8::Locker::IsLocked(isolate2_));
- v8::Isolate::Scope isolate_scope(isolate2_);
- v8::HandleScope handle_scope(isolate2_);
v8::Context::Scope context_scope(context2);
thread->Start();
CalcFibAndCheck();
@@ -535,7 +527,7 @@
class LockUnlockLockThread : public JoinableThread {
public:
- LockUnlockLockThread(v8::Isolate* isolate, v8::Local<v8::Context> context)
+ LockUnlockLockThread(v8::Isolate* isolate, v8::Handle<v8::Context> context)
: JoinableThread("LockUnlockLockThread"),
isolate_(isolate),
context_(isolate, context) {
@@ -548,7 +540,7 @@
{
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
- v8::Context::Scope context_scope(context_);
+ v8::Context::Scope context_scope(isolate_, context_);
CalcFibAndCheck();
}
{
@@ -561,7 +553,7 @@
v8::HandleScope handle_scope(isolate_);
CHECK(v8::Locker::IsLocked(isolate_));
CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
- v8::Context::Scope context_scope(context_);
+ v8::Context::Scope context_scope(isolate_, context_);
CalcFibAndCheck();
}
}
@@ -580,16 +572,15 @@
const int kNThreads = 100;
#endif
v8::Isolate* isolate = v8::Isolate::New();
- Persistent<v8::Context> context;
i::List<JoinableThread*> threads(kNThreads);
{
v8::Locker locker_(isolate);
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- context = v8::Context::New();
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
for (int i = 0; i < kNThreads; i++) {
threads.Add(new LockUnlockLockThread(
- isolate, v8::Local<v8::Context>::New(isolate, context)));
+ isolate, context));
}
}
StartJoinAndDeleteThreads(threads);
@@ -598,7 +589,7 @@
class LockUnlockLockDefaultIsolateThread : public JoinableThread {
public:
- explicit LockUnlockLockDefaultIsolateThread(v8::Local<v8::Context> context)
+ explicit LockUnlockLockDefaultIsolateThread(v8::Handle<v8::Context> context)
: JoinableThread("LockUnlockLockDefaultIsolateThread"),
context_(CcTest::default_isolate(), context) {}
@@ -606,7 +597,7 @@
v8::Locker lock1(CcTest::default_isolate());
{
v8::HandleScope handle_scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(context_);
+ v8::Context::Scope context_scope(CcTest::default_isolate(), context_);
CalcFibAndCheck();
}
{
@@ -614,7 +605,7 @@
{
v8::Locker lock2(CcTest::default_isolate());
v8::HandleScope handle_scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(context_);
+ v8::Context::Scope context_scope(CcTest::default_isolate(), context_);
CalcFibAndCheck();
}
}
@@ -631,15 +622,14 @@
#else
const int kNThreads = 100;
#endif
- Persistent<v8::Context> context;
+ Local<v8::Context> context;
i::List<JoinableThread*> threads(kNThreads);
{
v8::Locker locker_(CcTest::default_isolate());
v8::HandleScope handle_scope(CcTest::default_isolate());
- context = v8::Context::New();
+ context = v8::Context::New(CcTest::default_isolate());
for (int i = 0; i < kNThreads; i++) {
- threads.Add(new LockUnlockLockDefaultIsolateThread(
- v8::Local<v8::Context>::New(CcTest::default_isolate(), context)));
+ threads.Add(new LockUnlockLockDefaultIsolateThread(context));
}
}
StartJoinAndDeleteThreads(threads);
@@ -653,13 +643,12 @@
v8::Locker lock(isolate);
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- v8::Persistent<Context> context = v8::Context::New();
+ v8::Handle<Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
v8::Handle<String> source = v8::String::New("1+1");
v8::Handle<Script> script = v8::Script::Compile(source);
v8::Handle<Value> result = script->Run();
v8::String::AsciiValue ascii(result);
- context.Dispose(isolate);
}
isolate->Dispose();
}
@@ -685,9 +674,9 @@
v8::Isolate::Scope isolate_scope(isolate);
CHECK(!i::Isolate::Current()->has_installed_extensions());
v8::ExtensionConfiguration extensions(count_, extension_names_);
- v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate, &extensions);
CHECK(i::Isolate::Current()->has_installed_extensions());
- context.Dispose(isolate);
}
isolate->Dispose();
}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index ca6c7ae..3c34ede 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -29,6 +29,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "api.h"
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 89b11c9..3288fc8 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -34,6 +34,11 @@
#include <cmath>
#endif // __linux__
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "log.h"
#include "cpu-profiler.h"
@@ -62,7 +67,7 @@
// Need to run this prior to creating the scope.
trick_to_run_init_flags_(init_flags_(prof_lazy)),
scope_(v8::Isolate::GetCurrent()),
- env_(v8::Context::New()),
+ env_(v8::Context::New(v8::Isolate::GetCurrent())),
logger_(i::Isolate::Current()->logger()) {
env_->Enter();
}
@@ -371,7 +376,7 @@
TEST(Issue23768) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Handle<v8::Context> env = v8::Context::New();
+ v8::Handle<v8::Context> env = v8::Context::New(v8::Isolate::GetCurrent());
env->Enter();
SimpleExternalString source_ext_str("(function ext() {})();");
@@ -405,7 +410,8 @@
v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
- v8::Local<v8::Signature> signature = v8::Signature::New(obj);
+ v8::Local<v8::Signature> signature =
+ v8::Signature::New(v8::Handle<v8::FunctionTemplate>(*obj));
proto->Set(v8_str("method1"),
v8::FunctionTemplate::New(ObjMethod1,
v8::Handle<v8::Value>(),
@@ -563,9 +569,9 @@
// The result either be a "true" literal or problem description.
if (!result->IsTrue()) {
v8::Local<v8::String> s = result->ToString();
- i::ScopedVector<char> data(s->Length() + 1);
+ i::ScopedVector<char> data(s->Utf8Length() + 1);
CHECK_NE(NULL, data.start());
- s->WriteAscii(data.start());
+ s->WriteUtf8(data.start());
printf("%s\n", data.start());
// Make sure that our output is written prior crash due to CHECK failure.
fflush(stdout);
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 0cc68eb..2cb4646 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -35,6 +35,11 @@
#include <errno.h>
#endif
+
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "global-handles.h"
@@ -299,11 +304,11 @@
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void* id) {
ASSERT(id == reinterpret_cast<void*>(1234));
NumberOfWeakCalls++;
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
TEST(ObjectGroups) {
@@ -322,16 +327,16 @@
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g1s1.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
global_handles->MakeWeak(g1s2.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
global_handles->MakeWeak(g1c1.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
Handle<Object> g2s1 =
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
@@ -341,16 +346,16 @@
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g2s1.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
global_handles->MakeWeak(g2s2.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
global_handles->MakeWeak(g2c1.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
Handle<Object> root = global_handles->Create(*g1s1); // make a root.
@@ -379,8 +384,8 @@
// Weaken the root.
global_handles->MakeWeak(root.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
global_handles->ClearWeakness(g1c1.location());
@@ -408,12 +413,12 @@
// And now make children weak again and collect them.
global_handles->MakeWeak(g1c1.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
global_handles->MakeWeak(g2c1.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
diff --git a/test/cctest/test-object-observe.cc b/test/cctest/test-object-observe.cc
index 49007ba..9eb6f3c 100644
--- a/test/cctest/test-object-observe.cc
+++ b/test/cctest/test-object-observe.cc
@@ -57,7 +57,7 @@
TEST(PerIsolateState) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context1;
+ v8::LocalContext context1;
CompileRun(
"var count = 0;"
"var calls = 0;"
@@ -70,20 +70,20 @@
"(function() { obj.foo = 'bar'; })");
Handle<Value> notify_fun2;
{
- LocalContext context2;
+ v8::LocalContext context2;
context2->Global()->Set(String::New("obj"), obj);
notify_fun2 = CompileRun(
"(function() { obj.foo = 'baz'; })");
}
Handle<Value> notify_fun3;
{
- LocalContext context3;
+ v8::LocalContext context3;
context3->Global()->Set(String::New("obj"), obj);
notify_fun3 = CompileRun(
"(function() { obj.foo = 'bat'; })");
}
{
- LocalContext context4;
+ v8::LocalContext context4;
context4->Global()->Set(String::New("observer"), observer);
context4->Global()->Set(String::New("fun1"), notify_fun1);
context4->Global()->Set(String::New("fun2"), notify_fun2);
@@ -97,7 +97,7 @@
TEST(EndOfMicrotaskDelivery) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
CompileRun(
"var obj = {};"
"var count = 0;"
@@ -110,7 +110,7 @@
TEST(DeliveryOrdering) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
CompileRun(
"var obj1 = {};"
"var obj2 = {};"
@@ -141,7 +141,7 @@
TEST(DeliveryOrderingReentrant) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
CompileRun(
"var obj = {};"
"var reentered = false;"
@@ -172,7 +172,7 @@
TEST(DeliveryOrderingDeliverChangeRecords) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
CompileRun(
"var obj = {};"
"var ordering = [];"
@@ -197,14 +197,14 @@
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
// Initializing this context sets up initial hash tables.
- LocalContext context;
+ v8::LocalContext context;
Handle<Value> obj = CompileRun("obj = {};");
Handle<Value> observer = CompileRun(
"var ran = false;"
"(function() { ran = true })");
{
// As does initializing this context.
- LocalContext context2;
+ v8::LocalContext context2;
context2->Global()->Set(String::New("obj"), obj);
context2->Global()->Set(String::New("observer"), observer);
CompileRun(
@@ -224,7 +224,7 @@
TEST(GlobalObjectObservation) {
HarmonyIsolate isolate;
- LocalContext context;
+ v8::LocalContext context;
HandleScope scope(isolate.GetIsolate());
Handle<Object> global_proxy = context->Global();
Handle<Object> inner_global = global_proxy->GetPrototype().As<Object>();
@@ -256,7 +256,7 @@
// to the old context.
context->DetachGlobal();
{
- LocalContext context2;
+ v8::LocalContext context2;
context2->DetachGlobal();
context2->ReattachGlobal(global_proxy);
CompileRun(
@@ -271,7 +271,7 @@
// Attaching by passing to Context::New
{
// Delegates to Context::New
- LocalContext context3(NULL, Handle<ObjectTemplate>(), global_proxy);
+ v8::LocalContext context3(NULL, Handle<ObjectTemplate>(), global_proxy);
CompileRun(
"var records3 = [];"
"Object.observe(this, function(r) { [].push.apply(records3, r) });"
@@ -320,7 +320,7 @@
TEST(APITestBasicMutation) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
Handle<Object> obj = Handle<Object>::Cast(CompileRun(
"var records = [];"
"var obj = {};"
@@ -363,7 +363,7 @@
TEST(HiddenPrototypeObservation) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
Handle<FunctionTemplate> tmpl = FunctionTemplate::New();
tmpl->SetHiddenPrototype(true);
tmpl->InstanceTemplate()->Set(String::New("foo"), Number::New(75));
@@ -412,7 +412,7 @@
TEST(ObservationWeakMap) {
HarmonyIsolate isolate;
HandleScope scope(isolate.GetIsolate());
- LocalContext context;
+ v8::LocalContext context;
CompileRun(
"var obj = {};"
"Object.observe(obj, function(){});"
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 3ea1fb1..05fea0b 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -29,6 +29,9 @@
#include <stdio.h>
#include <string.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "cctest.h"
@@ -173,8 +176,9 @@
TEST(Preparsing) {
- v8::HandleScope handles(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> context = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
int marker;
i::Isolate::Current()->stack_guard()->SetStackLimit(
@@ -539,8 +543,9 @@
TEST(CharacterStreams) {
- v8::HandleScope handles(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> context = v8::Context::New();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
TestCharacterStream("abc\0\n\r\x7f", 7);
@@ -984,7 +989,7 @@
};
v8::HandleScope handles(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> context = v8::Context::New();
+ v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent());
v8::Context::Scope context_scope(context);
int marker;
@@ -1243,7 +1248,7 @@
if (i::FLAG_stress_compaction) return;
v8::HandleScope handles(v8::Isolate::GetCurrent());
- v8::Persistent<v8::Context> context = v8::Context::New();
+ v8::Handle<v8::Context> context = v8::Context::New(v8::Isolate::GetCurrent());
v8::Context::Scope context_scope(context);
int marker;
@@ -1284,7 +1289,8 @@
v8::internal::FLAG_min_preparse_length = 1; // Force preparsing.
v8::V8::Initialize();
v8::HandleScope scope(v8::Isolate::GetCurrent());
- v8::Context::Scope context_scope(v8::Context::New());
+ v8::Context::Scope context_scope(
+ v8::Context::New(v8::Isolate::GetCurrent()));
v8::TryCatch try_catch;
const char* script =
"\"use strict\"; \n"
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index 85adeca..70b34e3 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -27,6 +27,9 @@
//
// Tests of profiles generator and utilities.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
@@ -852,7 +855,6 @@
static ProfilerExtension kProfilerExtension;
v8::DeclareExtension kProfilerExtensionDeclaration(&kProfilerExtension);
-static v8::Persistent<v8::Context> env;
static const ProfileNode* PickChild(const ProfileNode* parent,
const char* name) {
@@ -869,14 +871,12 @@
// don't appear in the stack trace.
i::FLAG_use_inlining = false;
- if (env.IsEmpty()) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- const char* extensions[] = { "v8/profiler" };
- v8::ExtensionConfiguration config(1, extensions);
- env = v8::Context::New(&config);
- }
- v8::HandleScope scope(v8::Isolate::GetCurrent());
- env->Enter();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ context->Enter();
CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
CHECK_EQ(0, profiler->GetProfilesCount());
diff --git a/test/cctest/test-random.cc b/test/cctest/test-random.cc
index 9394fc9..0837ab3 100644
--- a/test/cctest/test-random.cc
+++ b/test/cctest/test-random.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#include "v8.h"
@@ -36,8 +39,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
void SetSeeds(Handle<ByteArray> seeds, uint32_t state0, uint32_t state1) {
for (int i = 0; i < 4; i++) {
@@ -70,11 +71,12 @@
TEST(CrankshaftRandom) {
- if (env.IsEmpty()) env = v8::Context::New();
+ v8::V8::Initialize();
// Skip test if crankshaft is disabled.
if (!V8::UseCrankshaft()) return;
- v8::HandleScope scope(env->GetIsolate());
- env->Enter();
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Context::Scope context_scope(v8::Context::New(isolate));
Handle<Context> context(Isolate::Current()->context());
Handle<JSObject> global(context->global_object());
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index c4175cb..f311dcc 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -28,6 +28,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "ast.h"
@@ -707,19 +711,17 @@
class ContextInitializer {
public:
ContextInitializer()
- : env_(),
- scope_(v8::Isolate::GetCurrent()),
+ : scope_(v8::Isolate::GetCurrent()),
+ env_(v8::Context::New(v8::Isolate::GetCurrent())),
zone_(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT) {
- env_ = v8::Context::New();
env_->Enter();
}
~ContextInitializer() {
env_->Exit();
- env_.Dispose(env_->GetIsolate());
}
private:
- v8::Persistent<v8::Context> env_;
v8::HandleScope scope_;
+ v8::Handle<v8::Context> env_;
v8::internal::ZoneScope zone_;
};
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 777c769..0cf8044 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -28,6 +28,11 @@
#include <signal.h>
#include "sys/stat.h"
+
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "debug.h"
@@ -246,8 +251,11 @@
// can be loaded from v8natives.js and their addresses can be processed. This
// will clear the pending fixups array, which would otherwise contain GC roots
// that would confuse the serialization/deserialization process.
- v8::Persistent<v8::Context> env = v8::Context::New();
- env.Dispose(env->GetIsolate());
+ {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
+ v8::Context::New(isolate);
+ }
WriteToFile(FLAG_testing_serialization_file);
}
@@ -299,10 +307,11 @@
// serialization. That doesn't matter. We don't need to be able to
// serialize a snapshot in a VM that is booted from a snapshot.
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
Deserialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
env->Enter();
SanityCheck();
@@ -312,10 +321,11 @@
DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
Deserialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
env->Enter();
SanityCheck();
@@ -325,10 +335,11 @@
DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
Deserialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
env->Enter();
const char* c_source = "\"1234\".length";
@@ -342,10 +353,11 @@
DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
SerializeTwice) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(isolate);
Deserialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
env->Enter();
const char* c_source = "\"1234\".length";
@@ -363,7 +375,12 @@
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
- v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8::Isolate::GetCurrent(),
+ v8::Context::New(v8::Isolate::GetCurrent()));
+ }
ASSERT(!env.IsEmpty());
env->Enter();
// Make sure all builtin scripts are cached.
@@ -497,7 +514,12 @@
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
- v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8::Isolate::GetCurrent(),
+ v8::Context::New(v8::Isolate::GetCurrent()));
+ }
ASSERT(!env.IsEmpty());
env->Enter();
// Make sure all builtin scripts are cached.
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 2889172..77e8e1b 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -32,6 +32,10 @@
#include <stdlib.h>
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "api.h"
diff --git a/test/cctest/test-symbols.cc b/test/cctest/test-symbols.cc
index 6a8323b..e1b3ea7 100644
--- a/test/cctest/test-symbols.cc
+++ b/test/cctest/test-symbols.cc
@@ -5,6 +5,10 @@
// of ConsStrings. These operations may not be very fast, but they
// should be possible without getting errors due to too deep recursion.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "cctest.h"
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index b22c150..4008663 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "platform.h"
#include "cctest.h"
@@ -124,7 +128,8 @@
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, DoLoop);
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -134,7 +139,6 @@
// Test that we can run the code again after thread termination.
CHECK(!v8::V8::IsExecutionTerminating());
v8::Script::Compile(source)->Run();
- context.Dispose(context->GetIsolate());
}
@@ -144,7 +148,8 @@
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, DoLoopNoCall);
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -154,7 +159,6 @@
CHECK(!v8::V8::IsExecutionTerminating());
// Test that we can run the code again after thread termination.
v8::Script::Compile(source)->Run();
- context.Dispose(context->GetIsolate());
}
@@ -183,7 +187,8 @@
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(Signal, DoLoop);
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -194,7 +199,6 @@
thread.Join();
delete semaphore;
semaphore = NULL;
- context.Dispose(context->GetIsolate());
}
@@ -207,14 +211,14 @@
v8_thread_id_ = v8::V8::GetCurrentThreadId();
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(Signal, DoLoop);
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
- context.Dispose(context->GetIsolate());
}
int GetV8ThreadId() { return v8_thread_id_; }
@@ -314,7 +318,8 @@
global->Set(v8::String::New("loop"),
v8::FunctionTemplate::New(LoopGetProperty));
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
@@ -326,7 +331,6 @@
CHECK(!v8::V8::IsExecutionTerminating());
call_count = 0;
v8::Script::Compile(source)->Run();
- context.Dispose(context->GetIsolate());
}
v8::Handle<v8::Value> ReenterAfterTermination(const v8::Arguments& args) {
@@ -360,7 +364,8 @@
v8::HandleScope scope(v8::Isolate::GetCurrent());
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, ReenterAfterTermination);
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
v8::Handle<v8::String> source =
@@ -370,7 +375,6 @@
// Check we can run JS again after termination.
CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
"f()"))->Run()->IsTrue());
- context.Dispose(context->GetIsolate());
}
v8::Handle<v8::Value> DoLoopCancelTerminate(const v8::Arguments& args) {
@@ -399,13 +403,13 @@
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> global =
CreateGlobalTemplate(TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Handle<v8::Context> context =
+ v8::Context::New(v8::Isolate::GetCurrent(), NULL, global);
v8::Context::Scope context_scope(context);
CHECK(!v8::V8::IsExecutionTerminating());
v8::Handle<v8::String> source =
v8::String::New("try { doloop(); } catch(e) { fail(); } 'completed';");
// Check that execution completed with correct return value.
CHECK(v8::Script::Compile(source)->Run()->Equals(v8_str("completed")));
- context.Dispose(context->GetIsolate());
}
diff --git a/test/cctest/test-threads.cc b/test/cctest/test-threads.cc
index 5a010a8..edec8bf 100644
--- a/test/cctest/test-threads.cc
+++ b/test/cctest/test-threads.cc
@@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(dcarney): remove
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
+#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
+
#include "v8.h"
#include "platform.h"
@@ -34,10 +38,12 @@
TEST(Preemption) {
- v8::Locker locker(CcTest::default_isolate());
+ v8::Isolate* isolate = CcTest::default_isolate();
+ v8::Locker locker(isolate);
v8::V8::Initialize();
- v8::HandleScope scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(v8::Context::New());
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
v8::Locker::StartPreemption(100);
@@ -67,9 +73,11 @@
public:
ThreadA() : Thread("ThreadA") { }
void Run() {
- v8::Locker locker(CcTest::default_isolate());
- v8::HandleScope scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(v8::Context::New());
+ v8::Isolate* isolate = CcTest::default_isolate();
+ v8::Locker locker(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
CHECK_EQ(FILL_CACHE, turn);
@@ -105,10 +113,12 @@
void Run() {
do {
{
- v8::Locker locker(CcTest::default_isolate());
+ v8::Isolate* isolate = CcTest::default_isolate();
+ v8::Locker locker(isolate);
if (turn == CLEAN_CACHE) {
- v8::HandleScope scope(CcTest::default_isolate());
- v8::Context::Scope context_scope(v8::Context::New());
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
// Clear the caches by forcing major GC.
HEAP->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
diff --git a/test/cctest/test-weakmaps.cc b/test/cctest/test-weakmaps.cc
index 1dd43a8..499286c 100644
--- a/test/cctest/test-weakmaps.cc
+++ b/test/cctest/test-weakmaps.cc
@@ -65,11 +65,11 @@
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(v8::Isolate* isolate,
- v8::Persistent<v8::Value> handle,
+ v8::Persistent<v8::Value>* handle,
void* id) {
ASSERT(id == reinterpret_cast<void*>(1234));
NumberOfWeakCalls++;
- handle.Dispose(isolate);
+ handle->Dispose(isolate);
}
@@ -114,8 +114,8 @@
HandleScope scope(isolate);
global_handles->MakeWeak(key.location(),
reinterpret_cast<void*>(1234),
- NULL,
- &WeakPointerCallback);
+ &WeakPointerCallback,
+ NULL);
}
CHECK(global_handles->IsWeak(key.location()));
diff --git a/test/mjsunit/allocation-site-info.js b/test/mjsunit/allocation-site-info.js
index d85ce3e..5c7ae13 100644
--- a/test/mjsunit/allocation-site-info.js
+++ b/test/mjsunit/allocation-site-info.js
@@ -284,5 +284,25 @@
assertKind(elements_kind.fast, obj);
obj = newarraycase_list_smiobj(2);
assertKind(elements_kind.fast, obj);
+
+ // Verify that cross context calls work
+ var realmA = Realm.current();
+ var realmB = Realm.create();
+ assertEquals(0, realmA);
+ assertEquals(1, realmB);
+
+ function instanceof_check(type) {
+ assertTrue(new type() instanceof type);
+ assertTrue(new type(5) instanceof type);
+ assertTrue(new type(1,2,3) instanceof type);
+ }
+
+ var realmBArray = Realm.eval(realmB, "Array");
+ instanceof_check(Array);
+ instanceof_check(realmBArray);
+ %OptimizeFunctionOnNextCall(instanceof_check);
+ instanceof_check(Array);
+ instanceof_check(realmBArray);
+ assertTrue(2 != %GetOptimizationStatus(instanceof_check));
}
}
diff --git a/test/mjsunit/external-array-no-sse2.js b/test/mjsunit/external-array-no-sse2.js
index 0b843d8..b3d91a5 100644
--- a/test/mjsunit/external-array-no-sse2.js
+++ b/test/mjsunit/external-array-no-sse2.js
@@ -49,12 +49,6 @@
assertEquals(0, a[0]);
assertEquals(0, a[1]);
-// No-parameter constructor should fail right now.
-function abfunc1() {
- return new ArrayBuffer();
-}
-assertThrows(abfunc1);
-
// Test derivation from an ArrayBuffer
var ab = new ArrayBuffer(12);
assertInstance(ab, ArrayBuffer);
@@ -161,12 +155,10 @@
assertSame(a.buffer, (new Int8Array(a.buffer,3,51)).buffer);
assertInstance(a.buffer, ArrayBuffer);
-// Test the correct behavior of the |BYTES_PER_ELEMENT| property (which is
-// "constant", but not read-only).
+// Test the correct behavior of the |BYTES_PER_ELEMENT| property.
a = new Int32Array(2);
assertEquals(4, a.BYTES_PER_ELEMENT);
a.BYTES_PER_ELEMENT = 42;
-assertEquals(42, a.BYTES_PER_ELEMENT);
a = new Uint8Array(2);
assertEquals(1, a.BYTES_PER_ELEMENT);
a = new Int16Array(2);
@@ -202,15 +194,6 @@
// Test non-number parameters.
var array_with_length_from_non_number = new Int32Array("2");
assertEquals(2, array_with_length_from_non_number.length);
-array_with_length_from_non_number = new Int32Array(undefined);
-assertEquals(0, array_with_length_from_non_number.length);
-var foo = { valueOf: function() { return 3; } };
-array_with_length_from_non_number = new Int32Array(foo);
-assertEquals(3, array_with_length_from_non_number.length);
-foo = { toString: function() { return "4"; } };
-array_with_length_from_non_number = new Int32Array(foo);
-assertEquals(4, array_with_length_from_non_number.length);
-
// Test loads and stores.
types = [Array, Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array,
@@ -359,8 +342,6 @@
a.length = 2;
assertEquals(kElementCount, a.length);
assertTrue(delete a.length);
- a.length = 2;
- assertEquals(2, a.length);
// Make sure bounds checks are handled correctly for external arrays.
run_bounds_test(a);
@@ -539,8 +520,6 @@
assertThrows(function(){ a.subarray.call({}, 0) });
assertThrows(function(){ a.subarray.call([], 0) });
-assertThrows(function(){ a.subarray.call(a) });
-
// Call constructors directly as functions, and through .call and .apply
diff --git a/test/mjsunit/external-array.js b/test/mjsunit/external-array.js
index 85a8cc5..e61ff45 100644
--- a/test/mjsunit/external-array.js
+++ b/test/mjsunit/external-array.js
@@ -49,12 +49,6 @@
assertEquals(0, a[0]);
assertEquals(0, a[1]);
-// No-parameter constructor should fail right now.
-function abfunc1() {
- return new ArrayBuffer();
-}
-assertThrows(abfunc1);
-
// Test derivation from an ArrayBuffer
var ab = new ArrayBuffer(12);
assertInstance(ab, ArrayBuffer);
@@ -161,12 +155,10 @@
assertSame(a.buffer, (new Int8Array(a.buffer,3,51)).buffer);
assertInstance(a.buffer, ArrayBuffer);
-// Test the correct behavior of the |BYTES_PER_ELEMENT| property (which is
-// "constant", but not read-only).
+// Test the correct behavior of the |BYTES_PER_ELEMENT| property
a = new Int32Array(2);
assertEquals(4, a.BYTES_PER_ELEMENT);
a.BYTES_PER_ELEMENT = 42;
-assertEquals(42, a.BYTES_PER_ELEMENT);
a = new Uint8Array(2);
assertEquals(1, a.BYTES_PER_ELEMENT);
a = new Int16Array(2);
@@ -202,15 +194,6 @@
// Test non-number parameters.
var array_with_length_from_non_number = new Int32Array("2");
assertEquals(2, array_with_length_from_non_number.length);
-array_with_length_from_non_number = new Int32Array(undefined);
-assertEquals(0, array_with_length_from_non_number.length);
-var foo = { valueOf: function() { return 3; } };
-array_with_length_from_non_number = new Int32Array(foo);
-assertEquals(3, array_with_length_from_non_number.length);
-foo = { toString: function() { return "4"; } };
-array_with_length_from_non_number = new Int32Array(foo);
-assertEquals(4, array_with_length_from_non_number.length);
-
// Test loads and stores.
types = [Array, Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array,
@@ -359,8 +342,6 @@
a.length = 2;
assertEquals(kElementCount, a.length);
assertTrue(delete a.length);
- a.length = 2;
- assertEquals(2, a.length);
// Make sure bounds checks are handled correctly for external arrays.
run_bounds_test(a);
@@ -452,7 +433,6 @@
a[0] = 1;
assertEquals(undefined, a[0]);
-
// Check construction from arrays.
a = new Uint32Array([]);
assertInstance(a, Uint32Array);
@@ -539,8 +519,6 @@
assertThrows(function(){ a.subarray.call({}, 0) });
assertThrows(function(){ a.subarray.call([], 0) });
-assertThrows(function(){ a.subarray.call(a) });
-
// Call constructors directly as functions, and through .call and .apply
diff --git a/test/mjsunit/harmony/generators-iteration.js b/test/mjsunit/harmony/generators-iteration.js
index ba0ae10..d120ac7 100644
--- a/test/mjsunit/harmony/generators-iteration.js
+++ b/test/mjsunit/harmony/generators-iteration.js
@@ -31,19 +31,40 @@
var GeneratorFunction = (function*(){yield 1;}).__proto__.constructor;
+function assertIteratorResult(value, done, result) {
+ assertEquals({ value: value, done: done}, result);
+}
+
+function TestGeneratorResultPrototype() {
+ function* g() { yield 1; }
+ var iter = g();
+ var result = iter.next();
+
+ assertSame(Object.prototype, Object.getPrototypeOf(result));
+ property_names = Object.getOwnPropertyNames(result);
+ property_names.sort();
+ assertEquals(["done", "value"], property_names);
+ assertIteratorResult(1, false, result);
+}
+TestGeneratorResultPrototype()
+
function TestGenerator(g, expected_values_for_next,
send_val, expected_values_for_send) {
function testNext(thunk) {
var iter = thunk();
for (var i = 0; i < expected_values_for_next.length; i++) {
- assertEquals(expected_values_for_next[i], iter.next());
+ assertIteratorResult(expected_values_for_next[i],
+ i == expected_values_for_next.length - 1,
+ iter.next());
}
assertThrows(function() { iter.next(); }, Error);
}
function testSend(thunk) {
var iter = thunk();
for (var i = 0; i < expected_values_for_send.length; i++) {
- assertEquals(expected_values_for_send[i], iter.send(send_val));
+ assertIteratorResult(expected_values_for_send[i],
+ i == expected_values_for_send.length - 1,
+ iter.send(send_val));
}
assertThrows(function() { iter.send(send_val); }, Error);
}
@@ -51,7 +72,9 @@
for (var i = 0; i < expected_values_for_next.length; i++) {
var iter = thunk();
for (var j = 0; j < i; j++) {
- assertEquals(expected_values_for_next[j], iter.next());
+ assertIteratorResult(expected_values_for_next[j],
+ j == expected_values_for_next.length - 1,
+ iter.next());
}
function Sentinel() {}
assertThrows(function () { iter.throw(new Sentinel); }, Sentinel);
@@ -270,6 +293,225 @@
"foo",
[2, "1foo3", 5, "4foo6", "foofoo"]);
+// Rewind a try context with and without operands on the stack.
+TestGenerator(
+ function* g24() {
+ try {
+ return (yield (1 + (yield 2) + 3)) + (yield (4 + (yield 5) + 6));
+ } catch (e) {
+ throw e;
+ }
+ },
+ [2, NaN, 5, NaN, NaN],
+ "foo",
+ [2, "1foo3", 5, "4foo6", "foofoo"]);
+
+// Yielding in a catch context, with and without operands on the stack.
+TestGenerator(
+ function* g25() {
+ try {
+ throw (yield (1 + (yield 2) + 3))
+ } catch (e) {
+ if (typeof e == 'object') throw e;
+ return e + (yield (4 + (yield 5) + 6));
+ }
+ },
+ [2, NaN, 5, NaN, NaN],
+ "foo",
+ [2, "1foo3", 5, "4foo6", "foofoo"]);
+
+function TestTryCatch() {
+ function* g() { yield 1; try { yield 2; } catch (e) { yield e; } yield 3; }
+ function Sentinel() {}
+ var iter;
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+}
+TestTryCatch();
+
+function TestTryFinally() {
+ function* g() { yield 1; try { yield 2; } finally { yield 3; } yield 4; }
+ function Sentinel() {}
+ function Sentinel2() {}
+ var iter;
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.next(); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+}
+TestTryFinally();
+
+function TestNestedTry() {
+ function* g() {
+ try {
+ yield 1;
+ try { yield 2; } catch (e) { yield e; }
+ yield 3;
+ } finally {
+ yield 4;
+ }
+ yield 5;
+ }
+ function Sentinel() {}
+ function Sentinel2() {}
+ var iter;
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(5, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(4, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.next(); }, Sentinel);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(4, false, iter.throw(new Sentinel));
+ assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.next());
+ assertIteratorResult(5, false, iter.next());
+ assertIteratorResult(undefined, true, iter.next());
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(4, false, iter.throw(new Sentinel2));
+ assertThrows(function() { iter.next(); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+
+ iter = g();
+ assertIteratorResult(1, false, iter.next());
+ assertIteratorResult(2, false, iter.next());
+ var exn = new Sentinel;
+ assertIteratorResult(exn, false, iter.throw(exn));
+ assertIteratorResult(3, false, iter.next());
+ assertIteratorResult(4, false, iter.throw(new Sentinel2));
+ assertThrows(function() { iter.next(); }, Sentinel2);
+ assertThrows(function() { iter.next(); }, Error);
+
+ // That's probably enough.
+}
+TestNestedTry();
+
function TestRecursion() {
function TestNextRecursion() {
function* g() { yield iter.next(); }
diff --git a/test/mjsunit/harmony/generators-runtime.js b/test/mjsunit/harmony/generators-runtime.js
index 0182fc3..b4e8f95 100644
--- a/test/mjsunit/harmony/generators-runtime.js
+++ b/test/mjsunit/harmony/generators-runtime.js
@@ -105,6 +105,8 @@
assertSame(Function, Object.getPrototypeOf(GeneratorFunction));
assertTrue(g instanceof Function);
+ assertEquals("function* g() { yield 1; }", g.toString());
+
// Not all functions are generators.
assertTrue(f instanceof Function); // Sanity check.
assertTrue(!(f instanceof GeneratorFunction));
diff --git a/test/mjsunit/harmony/typedarrays.js b/test/mjsunit/harmony/typedarrays.js
index 2e03985..c5c19e1 100644
--- a/test/mjsunit/harmony/typedarrays.js
+++ b/test/mjsunit/harmony/typedarrays.js
@@ -68,8 +68,16 @@
function TestSlice(expectedResultLen, initialLen, start, end) {
var ab = new ArrayBuffer(initialLen);
+ var a1 = new Uint8Array(ab);
+ for (var i = 0; i < a1.length; i++) {
+ a1[i] = 0xCA;
+ }
var slice = ab.slice(start, end);
assertSame(expectedResultLen, slice.byteLength);
+ var a2 = new Uint8Array(slice);
+ for (var i = 0; i < a2.length; i++) {
+ assertSame(0xCA, a2[i]);
+ }
}
function TestArrayBufferSlice() {
@@ -191,7 +199,16 @@
assertSame(typicalElement, a4[i]);
}
- assertThrows(function () { new proto(ab, 256*elementSize); }, RangeError);
+ var aAtTheEnd = new proto(ab, 256*elementSize);
+ assertSame(elementSize, aAtTheEnd.BYTES_PER_ELEMENT);
+ assertSame(0, aAtTheEnd.length);
+ assertSame(0, aAtTheEnd.byteLength);
+ assertSame(256*elementSize, aAtTheEnd.byteOffset);
+
+ assertThrows(function () { new proto(ab, 257*elementSize); }, RangeError);
+ assertThrows(
+ function () { new proto(ab, 128*elementSize, 192); },
+ RangeError);
if (elementSize !== 1) {
assertThrows(function() { new proto(ab, 128*elementSize - 1, 10); },
@@ -203,8 +220,37 @@
assertThrows(function() { new proto(unalignedArrayBuffer)}, RangeError);
assertThrows(function() { new proto(unalignedArrayBuffer, 5*elementSize)},
RangeError);
+ assertThrows(function() { new proto() }, TypeError);
}
+ var aFromString = new proto("30");
+ assertSame(elementSize, aFromString.BYTES_PER_ELEMENT);
+ assertSame(30, aFromString.length);
+ assertSame(30*elementSize, aFromString.byteLength);
+ assertSame(0, aFromString.byteOffset);
+ assertSame(30*elementSize, aFromString.buffer.byteLength);
+
+ var jsArray = [];
+ for (i = 0; i < 30; i++) {
+ jsArray.push(typicalElement);
+ }
+ var aFromArray = new proto(jsArray);
+ assertSame(elementSize, aFromArray.BYTES_PER_ELEMENT);
+ assertSame(30, aFromArray.length);
+ assertSame(30*elementSize, aFromArray.byteLength);
+ assertSame(0, aFromArray.byteOffset);
+ assertSame(30*elementSize, aFromArray.buffer.byteLength);
+ for (i = 0; i < 30; i++) {
+ assertSame(typicalElement, aFromArray[i]);
+ }
+
+ var abLen0 = new ArrayBuffer(0);
+ var aOverAbLen0 = new proto(abLen0);
+ assertSame(abLen0, aOverAbLen0.buffer);
+ assertSame(elementSize, aOverAbLen0.BYTES_PER_ELEMENT);
+ assertSame(0, aOverAbLen0.length);
+ assertSame(0, aOverAbLen0.byteLength);
+ assertSame(0, aOverAbLen0.byteOffset);
}
TestTypedArray(Uint8Array, 1, 0xFF);
@@ -215,7 +261,201 @@
TestTypedArray(Int32Array, 4, -0x7FFFFFFF);
TestTypedArray(Float32Array, 4, 0.5);
TestTypedArray(Float64Array, 8, 0.5);
+TestTypedArray(Uint8ClampedArray, 1, 0xFF);
+function SubarrayTestCase(constructor, item, expectedResultLen, expectedStartIndex,
+ initialLen, start, end) {
+ var a = new constructor(initialLen);
+ var s = a.subarray(start, end);
+ assertSame(constructor, s.constructor);
+ assertSame(expectedResultLen, s.length);
+ if (s.length > 0) {
+ s[0] = item;
+ assertSame(item, a[expectedStartIndex]);
+ }
+}
+
+function TestSubArray(constructor, item) {
+ SubarrayTestCase(constructor, item, 512, 512, 1024, 512, 1024);
+ SubarrayTestCase(constructor, item, 512, 512, 1024, 512);
+
+ SubarrayTestCase(constructor, item, 0, undefined, 0, 1, 20);
+ SubarrayTestCase(constructor, item, 100, 0, 100, 0, 100);
+ SubarrayTestCase(constructor, item, 100, 0, 100, 0, 1000);
+ SubarrayTestCase(constructor, item, 0, undefined, 100, 5, 1);
+
+ SubarrayTestCase(constructor, item, 1, 89, 100, -11, -10);
+ SubarrayTestCase(constructor, item, 9, 90, 100, -10, 99);
+ SubarrayTestCase(constructor, item, 0, undefined, 100, -10, 80);
+ SubarrayTestCase(constructor, item, 10,80, 100, 80, -10);
+
+ SubarrayTestCase(constructor, item, 10,90, 100, 90, "100");
+ SubarrayTestCase(constructor, item, 10,90, 100, "90", "100");
+
+ SubarrayTestCase(constructor, item, 0, undefined, 100, 90, "abc");
+ SubarrayTestCase(constructor, item, 10,0, 100, "abc", 10);
+
+ SubarrayTestCase(constructor, item, 10,0, 100, 0.96, 10.96);
+ SubarrayTestCase(constructor, item, 10,0, 100, 0.96, 10.01);
+ SubarrayTestCase(constructor, item, 10,0, 100, 0.01, 10.01);
+ SubarrayTestCase(constructor, item, 10,0, 100, 0.01, 10.96);
+
+
+ SubarrayTestCase(constructor, item, 10,90, 100, 90);
+ SubarrayTestCase(constructor, item, 10,90, 100, -10);
+}
+
+TestSubArray(Uint8Array, 0xFF);
+TestSubArray(Int8Array, -0x7F);
+TestSubArray(Uint16Array, 0xFFFF);
+TestSubArray(Int16Array, -0x7FFF);
+TestSubArray(Uint32Array, 0xFFFFFFFF);
+TestSubArray(Int32Array, -0x7FFFFFFF);
+TestSubArray(Float32Array, 0.5);
+TestSubArray(Float64Array, 0.5);
+TestSubArray(Uint8ClampedArray, 0xFF);
+
+function TestTypedArrayOutOfRange(constructor, value, result) {
+ var a = new constructor(1);
+ a[0] = value;
+ assertSame(result, a[0]);
+}
+
+TestTypedArrayOutOfRange(Uint8Array, 0x1FA, 0xFA);
+TestTypedArrayOutOfRange(Uint8Array, -1, 0xFF);
+
+TestTypedArrayOutOfRange(Int8Array, 0x1FA, 0x7A - 0x80);
+
+TestTypedArrayOutOfRange(Uint16Array, 0x1FFFA, 0xFFFA);
+TestTypedArrayOutOfRange(Uint16Array, -1, 0xFFFF);
+TestTypedArrayOutOfRange(Int16Array, 0x1FFFA, 0x7FFA - 0x8000);
+
+TestTypedArrayOutOfRange(Uint32Array, 0x1FFFFFFFA, 0xFFFFFFFA);
+TestTypedArrayOutOfRange(Uint32Array, -1, 0xFFFFFFFF);
+TestTypedArrayOutOfRange(Int32Array, 0x1FFFFFFFA, 0x7FFFFFFA - 0x80000000);
+
+TestTypedArrayOutOfRange(Uint8ClampedArray, 0x1FA, 0xFF);
+TestTypedArrayOutOfRange(Uint8ClampedArray, -1, 0);
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array];
+
+function TestPropertyTypeChecks(constructor) {
+ var a = new constructor();
+ function CheckProperty(name) {
+ var d = Object.getOwnPropertyDescriptor(constructor.prototype, name);
+ var o = {}
+ assertThrows(function() {d.get.call(o);}, TypeError);
+ d.get.call(a); // shouldn't throw
+ for (var i = 0 ; i < typedArrayConstructors.length; i++) {
+ d.get.call(new typedArrayConstructors[i](10));
+ }
+ }
+
+ CheckProperty("buffer");
+ CheckProperty("byteOffset");
+ CheckProperty("byteLength");
+ CheckProperty("length");
+}
+
+for(i = 0; i < typedArrayConstructors.lenght; i++) {
+ TestPropertyTypeChecks(typedArrayConstructors[i]);
+}
+
+
+function TestTypedArraySet() {
+ // Test array.set in different combinations.
+
+ function assertArrayPrefix(expected, array) {
+ for (var i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], array[i]);
+ }
+ }
+
+ var a11 = new Int16Array([1, 2, 3, 4, 0, -1])
+ var a12 = new Uint16Array(15)
+ a12.set(a11, 3)
+ assertArrayPrefix([0, 0, 0, 1, 2, 3, 4, 0, 0xffff, 0, 0], a12)
+ assertThrows(function(){ a11.set(a12) })
+
+ var a21 = [1, undefined, 10, NaN, 0, -1, {valueOf: function() {return 3}}]
+ var a22 = new Int32Array(12)
+ a22.set(a21, 2)
+ assertArrayPrefix([0, 0, 1, 0, 10, 0, 0, -1, 3, 0], a22)
+
+ var a31 = new Float32Array([2, 4, 6, 8, 11, NaN, 1/0, -3])
+ var a32 = a31.subarray(2, 6)
+ a31.set(a32, 4)
+ assertArrayPrefix([2, 4, 6, 8, 6, 8, 11, NaN], a31)
+ assertArrayPrefix([6, 8, 6, 8], a32)
+
+ var a4 = new Uint8ClampedArray([3,2,5,6])
+ a4.set(a4)
+ assertArrayPrefix([3, 2, 5, 6], a4)
+
+ // Cases with overlapping backing store but different element sizes.
+ var b = new ArrayBuffer(4)
+ var a5 = new Int16Array(b)
+ var a50 = new Int8Array(b)
+ var a51 = new Int8Array(b, 0, 2)
+ var a52 = new Int8Array(b, 1, 2)
+ var a53 = new Int8Array(b, 2, 2)
+
+ a5.set([0x5050, 0x0a0a])
+ assertArrayPrefix([0x50, 0x50, 0x0a, 0x0a], a50)
+ assertArrayPrefix([0x50, 0x50], a51)
+ assertArrayPrefix([0x50, 0x0a], a52)
+ assertArrayPrefix([0x0a, 0x0a], a53)
+
+ a50.set([0x50, 0x50, 0x0a, 0x0a])
+ a51.set(a5)
+ assertArrayPrefix([0x50, 0x0a, 0x0a, 0x0a], a50)
+
+ a50.set([0x50, 0x50, 0x0a, 0x0a])
+ a52.set(a5)
+ assertArrayPrefix([0x50, 0x50, 0x0a, 0x0a], a50)
+
+ a50.set([0x50, 0x50, 0x0a, 0x0a])
+ a53.set(a5)
+ assertArrayPrefix([0x50, 0x50, 0x50, 0x0a], a50)
+
+ a50.set([0x50, 0x51, 0x0a, 0x0b])
+ a5.set(a51)
+ assertArrayPrefix([0x0050, 0x0051], a5)
+
+ a50.set([0x50, 0x51, 0x0a, 0x0b])
+ a5.set(a52)
+ assertArrayPrefix([0x0051, 0x000a], a5)
+
+ a50.set([0x50, 0x51, 0x0a, 0x0b])
+ a5.set(a53)
+ assertArrayPrefix([0x000a, 0x000b], a5)
+
+ // Mixed types of same size.
+ var a61 = new Float32Array([1.2, 12.3])
+ var a62 = new Int32Array(2)
+ a62.set(a61)
+ assertArrayPrefix([1, 12], a62)
+ a61.set(a62)
+ assertArrayPrefix([1, 12], a61)
+
+ // Invalid source
+ var a = new Uint16Array(50);
+ assertThrows(function() { a.set(0) }, TypeError);
+ assertThrows(function() { a.set({}) }, TypeError);
+ assertThrows(function() { a.set.call({}) }, TypeError);
+ assertThrows(function() { a.set.call([]) }, TypeError);
+}
+
+TestTypedArraySet();
// General tests for properties
@@ -232,8 +472,9 @@
assertArrayEquals([], props(obj));
}
TestEnumerable(ArrayBuffer, new ArrayBuffer());
-TestEnumerable(Uint8Array);
-
+for(i = 0; i < typedArrayConstructors.lenght; i++) {
+ TestEnumerable(typedArrayConstructors[i]);
+}
// Test arbitrary properties on ArrayBuffer
function TestArbitrary(m) {
@@ -247,6 +488,11 @@
}
}
TestArbitrary(new ArrayBuffer(256));
+for(i = 0; i < typedArrayConstructors.lenght; i++) {
+ TestArbitary(new typedArrayConstructors[i](10));
+}
+
+
// Test direct constructor call
assertTrue(ArrayBuffer() instanceof ArrayBuffer);
diff --git a/test/mjsunit/lea-add.js b/test/mjsunit/lea-add.js
new file mode 100644
index 0000000..28a1494
--- /dev/null
+++ b/test/mjsunit/lea-add.js
@@ -0,0 +1,84 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function a() {
+ var sum = 0;
+ for (var i = 0; i < 500; ++i) {
+ sum = (i + sum) | 0;
+ }
+ return sum;
+}
+
+function b() {
+ var sum = 0;
+ for (var i = -500; i < 0; ++i) {
+ sum = (i + sum) | 0;
+ }
+ return sum;
+}
+
+function c() {
+ var sum = 0;
+ for (var i = 0; i < 500; ++i) {
+ sum += (i + -0x7fffffff) | 0;
+ }
+ return sum;
+}
+
+function d() {
+ var sum = 0;
+ for (var i = -501; i < 0; ++i) {
+ sum += (i + 501) | 0;
+ }
+ return sum;
+}
+
+a();
+a();
+%OptimizeFunctionOnNextCall(a);
+assertEquals(124750, a());
+assertEquals(124750, a());
+
+b();
+b();
+%OptimizeFunctionOnNextCall(b);
+assertEquals(-125250, b());
+assertEquals(-125250, b());
+
+c();
+c();
+%OptimizeFunctionOnNextCall(c);
+assertEquals(-1073741698750, c());
+assertEquals(-1073741698750, c());
+
+d();
+d();
+%OptimizeFunctionOnNextCall(d);
+assertEquals(125250, d());
+assertEquals(125250, d());
diff --git a/test/mjsunit/regress/regress-2671-1.js b/test/mjsunit/regress/regress-2671-1.js
new file mode 100644
index 0000000..042a501
--- /dev/null
+++ b/test/mjsunit/regress/regress-2671-1.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var y;
+function f() {
+ var a = [];
+ a[20] = 0;
+ y = 3;
+ var i = 7 * (y + -0);
+ a[i]++;
+ assertTrue(isNaN(a[i]));
+}
+
+f();
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
diff --git a/test/mjsunit/regress/regress-2671.js b/test/mjsunit/regress/regress-2671.js
new file mode 100644
index 0000000..8da1b8f
--- /dev/null
+++ b/test/mjsunit/regress/regress-2671.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var y;
+function f() {
+ var a = [];
+ a[20] = 0;
+ y = 3;
+ var i = 7 * (y + -0);
+ a[i] = 1/y;
+ assertFalse(isNaN(a[i]));
+}
+
+f();
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
diff --git a/test/mjsunit/track-fields.js b/test/mjsunit/track-fields.js
index 3176a5a..bcf37ae 100644
--- a/test/mjsunit/track-fields.js
+++ b/test/mjsunit/track-fields.js
@@ -99,3 +99,165 @@
// Smi, double, object.
o6.c = {};
assertTrue(%HaveSameMap(o6, o7));
+
+function poly_load(o, b) {
+ var v = o.field;
+ if (b) {
+ return v + 10;
+ }
+ return o;
+}
+
+var of1 = {a:0};
+of1.field = {};
+var of2 = {b:0};
+of2.field = 10;
+
+poly_load(of1, false);
+poly_load(of1, false);
+poly_load(of2, true);
+%OptimizeFunctionOnNextCall(poly_load);
+assertEquals("[object Object]10", poly_load(of1, true));
+
+// Ensure small object literals with doubles do not share double storage.
+function object_literal() { return {"a":1.5}; }
+var o8 = object_literal();
+var o9 = object_literal();
+o8.a = 4.6
+assertEquals(1.5, o9.a);
+
+// Ensure double storage is not leaked in the case of polymorphic loads.
+function load_poly(o) {
+ return o.a;
+}
+
+var o10 = { "a": 1.6 };
+var o11 = { "b": 1, "a": 1.7 };
+load_poly(o10);
+load_poly(o10);
+load_poly(o11);
+%OptimizeFunctionOnNextCall(load_poly);
+var val = load_poly(o10);
+o10.a = 19.5;
+assertFalse(o10.a == val);
+
+// Ensure polymorphic loads only go monomorphic when the representations are
+// compatible.
+
+// Check polymorphic load from double + object fields.
+function load_mono(o) {
+ return o.a1;
+}
+
+var object = {"x": 1};
+var o10 = { "a1": 1.6 };
+var o11 = { "a1": object, "b": 1 };
+load_mono(o10);
+load_mono(o10);
+load_mono(o11);
+%OptimizeFunctionOnNextCall(load_mono);
+assertEquals(object, load_mono(o11));
+
+// Check polymorphic load from smi + object fields.
+function load_mono2(o) {
+ return o.a2;
+}
+
+var o12 = { "a2": 5 };
+var o13 = { "a2": object, "b": 1 };
+load_mono2(o12);
+load_mono2(o12);
+load_mono2(o13);
+%OptimizeFunctionOnNextCall(load_mono2);
+assertEquals(object, load_mono2(o13));
+
+// Check polymorphic load from double + double fields.
+function load_mono3(o) {
+ return o.a3;
+}
+
+var o14 = { "a3": 1.6 };
+var o15 = { "a3": 1.8, "b": 1 };
+load_mono3(o14);
+load_mono3(o14);
+load_mono3(o15);
+%OptimizeFunctionOnNextCall(load_mono3);
+assertEquals(1.6, load_mono3(o14));
+assertEquals(1.8, load_mono3(o15));
+
+// Check that JSON parsing respects existing representations.
+var o16 = JSON.parse('{"a":1.5}');
+var o17 = JSON.parse('{"a":100}');
+assertTrue(%HaveSameMap(o16, o17));
+var o17_a = o17.a;
+assertEquals(100, o17_a);
+o17.a = 200;
+assertEquals(100, o17_a);
+assertEquals(200, o17.a);
+
+// Ensure normalizing results in ignored representations.
+var o18 = {};
+o18.field1 = 100;
+o18.field2 = 1;
+o18.to_delete = 100;
+
+var o19 = {};
+o19.field1 = 100;
+o19.field2 = 1.6;
+o19.to_delete = 100;
+
+assertFalse(%HaveSameMap(o18, o19));
+
+delete o18.to_delete;
+delete o19.to_delete;
+
+assertTrue(%HaveSameMap(o18, o19));
+assertEquals(1, o18.field2);
+assertEquals(1.6, o19.field2);
+
+// Test megamorphic keyed stub behaviour in combination with representations.
+var some_object20 = {"a":1};
+var o20 = {};
+o20.smi = 1;
+o20.dbl = 1.5;
+o20.obj = some_object20;
+
+function keyed_load(o, k) {
+ return o[k];
+}
+
+function keyed_store(o, k, v) {
+ return o[k] = v;
+}
+
+var smi20 = keyed_load(o20, "smi");
+var dbl20 = keyed_load(o20, "dbl");
+var obj20 = keyed_load(o20, "obj");
+keyed_load(o20, "smi");
+keyed_load(o20, "dbl");
+keyed_load(o20, "obj");
+keyed_load(o20, "smi");
+keyed_load(o20, "dbl");
+keyed_load(o20, "obj");
+
+assertEquals(1, smi20);
+assertEquals(1.5, dbl20);
+assertEquals(some_object20, obj20);
+
+keyed_store(o20, "smi", 100);
+keyed_store(o20, "dbl", 100);
+keyed_store(o20, "obj", 100);
+keyed_store(o20, "smi", 100);
+keyed_store(o20, "dbl", 100);
+keyed_store(o20, "obj", 100);
+keyed_store(o20, "smi", 100);
+keyed_store(o20, "dbl", 100);
+keyed_store(o20, "obj", 100);
+
+assertEquals(1, smi20);
+assertEquals(1.5, dbl20);
+assertEquals(some_object20, obj20);
+
+assertEquals(100, o20.smi);
+assertEquals(100, o20.dbl);
+assertEquals(100, o20.dbl);
diff --git a/tools/grokdump.py b/tools/grokdump.py
index f3ae8a2..1be5cb8 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -2036,17 +2036,24 @@
def do_u(self, args):
"""
- u 0x<address> 0x<size>
- Unassemble memory in the region [address, address + size)
+ Unassemble memory in the region [address, address + size). If the
+ size is not specified, a default value of 32 bytes is used.
+ Synopsis: u 0x<address> 0x<size>
"""
args = args.split(' ')
start = int(args[0], 16)
- size = int(args[1], 16)
+ size = int(args[1], 16) if len(args) > 1 else 0x20
+ if not self.reader.IsValidAddress(start):
+ print "Address is not contained within the minidump!"
+ return
lines = self.reader.GetDisasmLines(start, size)
for line in lines:
print FormatDisasmLine(start, self.heap, line)
print
+ def do_EOF(self, none):
+ raise KeyboardInterrupt
+
EIP_PROXIMITY = 64
CONTEXT_FOR_ARCH = {
@@ -2131,7 +2138,10 @@
FullDump(reader, heap)
if options.shell:
- InspectionShell(reader, heap).cmdloop("type help to get help")
+ try:
+ InspectionShell(reader, heap).cmdloop("type help to get help")
+ except KeyboardInterrupt:
+ print "Kthxbye."
else:
if reader.exception is not None:
print "Annotated stack (from exception.esp to bottom):"
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 7070d97..fc6296a 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -800,6 +800,7 @@
'../../src/proxy.js',
'../../src/collection.js',
'../../src/object-observe.js',
+ '../../src/arraybuffer.js',
'../../src/typedarray.js',
'../../src/generator.js'
],