Update V8 to r6333 (2.5 branch)
Note: we temporarily deviate from 9.0.597.x to pull v8 v2.5.9.9
Change-Id: I08d06e15a1a3af206fb23f98f56c81fe579e1a78
http://v8.googlecode.com/svn/branches/2.5@6333
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index b948d81..b7133be 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,5 @@
We use a V8 revision that has been used for a Chromium release.
-http://src.chromium.org/svn/releases/9.0.597.55/DEPS
-http://v8.googlecode.com/svn/branches/2.5@6122
+Note: we temporarily deviate from 9.0.597.x to pull v8 v2.5.9.9
+
+http://v8.googlecode.com/svn/branches/2.5@6333
diff --git a/include/v8.h b/include/v8.h
index 8ecf63a..2684cd3 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -3300,10 +3300,10 @@
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
-template <size_t ptr_size> struct SmiConstants;
+template <size_t ptr_size> struct SmiTagging;
// Smi constants for 32-bit systems.
-template <> struct SmiConstants<4> {
+template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
static inline int SmiToInt(internal::Object* value) {
@@ -3311,10 +3311,15 @@
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
+
+ // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
+ // with a plain reinterpret_cast.
+ static const intptr_t kEncodablePointerMask = 0x1;
+ static const int kPointerToSmiShift = 0;
};
// Smi constants for 64-bit systems.
-template <> struct SmiConstants<8> {
+template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
static inline int SmiToInt(internal::Object* value) {
@@ -3322,10 +3327,26 @@
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
+
+ // To maximize the range of pointers that can be encoded
+ // in the available 32 bits, we require them to be 8 bytes aligned.
+ // This gives 2 ^ (32 + 3) = 32G address space covered.
+ // It might be not enough to cover stack allocated objects on some platforms.
+ static const int kPointerAlignment = 3;
+
+ static const intptr_t kEncodablePointerMask =
+ ~(intptr_t(0xffffffff) << kPointerAlignment);
+
+ static const int kPointerToSmiShift =
+ kSmiTagSize + kSmiShiftSize - kPointerAlignment;
};
-const int kSmiShiftSize = SmiConstants<kApiPointerSize>::kSmiShiftSize;
-const int kSmiValueSize = SmiConstants<kApiPointerSize>::kSmiValueSize;
+typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
+const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
+const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
+const intptr_t kEncodablePointerMask =
+ PlatformSmiTagging::kEncodablePointerMask;
+const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
template <size_t ptr_size> struct InternalConstants;
@@ -3373,7 +3394,7 @@
}
static inline int SmiValue(internal::Object* value) {
- return SmiConstants<kApiPointerSize>::SmiToInt(value);
+ return PlatformSmiTagging::SmiToInt(value);
}
static inline int GetInstanceType(internal::Object* obj) {
@@ -3382,9 +3403,14 @@
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
+ static inline void* GetExternalPointerFromSmi(internal::Object* value) {
+ const intptr_t address = reinterpret_cast<intptr_t>(value);
+ return reinterpret_cast<void*>(address >> kPointerToSmiShift);
+ }
+
static inline void* GetExternalPointer(internal::Object* obj) {
if (HasSmiTag(obj)) {
- return obj;
+ return GetExternalPointerFromSmi(obj);
} else if (GetInstanceType(obj) == kProxyType) {
return ReadField<void*>(obj, kProxyProxyOffset);
} else {
diff --git a/src/api.cc b/src/api.cc
index 19af866..b1fb88a 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3243,18 +3243,35 @@
}
+static bool CanBeEncodedAsSmi(void* ptr) {
+ const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ return ((address & i::kEncodablePointerMask) == 0);
+}
+
+
+static i::Smi* EncodeAsSmi(void* ptr) {
+ ASSERT(CanBeEncodedAsSmi(ptr));
+ const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
+ ASSERT(i::Internals::HasSmiTag(result));
+ ASSERT_EQ(result, i::Smi::FromInt(result->value()));
+ ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
+ return result;
+}
+
+
void v8::Object::SetPointerInInternalField(int index, void* value) {
ENTER_V8;
- i::Object* as_object = reinterpret_cast<i::Object*>(value);
- if (as_object->IsSmi()) {
- Utils::OpenHandle(this)->SetInternalField(index, as_object);
- return;
+ if (CanBeEncodedAsSmi(value)) {
+ Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
+ } else {
+ HandleScope scope;
+ i::Handle<i::Proxy> proxy =
+ i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+ if (!proxy.is_null())
+ Utils::OpenHandle(this)->SetInternalField(index, *proxy);
}
- HandleScope scope;
- i::Handle<i::Proxy> proxy =
- i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
- if (!proxy.is_null())
- Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+ ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -3537,11 +3554,13 @@
LOG_API("External::Wrap");
EnsureInitialized("v8::External::Wrap()");
ENTER_V8;
- i::Object* as_object = reinterpret_cast<i::Object*>(data);
- if (as_object->IsSmi()) {
- return Utils::ToLocal(i::Handle<i::Object>(as_object));
- }
- return ExternalNewImpl(data);
+
+ v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
+ ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
+ : v8::Local<v8::Value>(ExternalNewImpl(data));
+
+ ASSERT_EQ(data, Unwrap(result));
+ return result;
}
@@ -3549,7 +3568,7 @@
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* value = obj->GetInternalField(index);
if (value->IsSmi()) {
- return value;
+ return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsProxy()) {
return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
} else {
@@ -3563,8 +3582,7 @@
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
- // The external value was an aligned pointer.
- result = *obj;
+ result = i::Internals::GetExternalPointerFromSmi(*obj);
} else if (obj->IsProxy()) {
result = ExternalValueImpl(obj);
} else {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 0a5eac2..2117ce6 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1923,7 +1923,7 @@
__ cmp(r7, Operand(HeapNumber::kMantissaBits));
// If greater or equal, the argument is already round and in r0.
__ b(&restore_fpscr_and_return, ge);
- __ b(&slow);
+ __ b(&wont_fit_smi);
__ bind(&no_vfp_exception);
// Move the result back to general purpose register r0.
@@ -1951,10 +1951,10 @@
__ Ret();
__ bind(&wont_fit_smi);
- __ bind(&slow);
// Restore FPCSR and fall to slow case.
__ vmsr(r3);
+ __ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index cb8e919..cc7cbe5 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -133,9 +133,7 @@
}
bool OS::ArmCpuHasFeature(CpuFeature feature) {
- const int max_items = 2;
- const char* search_strings[max_items] = { NULL, NULL };
- int search_items = 0;
+ const char* search_string = NULL;
// Simple detection of VFP at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to ARM (mid 2009), no similar
@@ -143,25 +141,26 @@
// so it's up to individual OSes to provide such.
switch (feature) {
case VFP3:
- search_strings[0] = "vfpv3";
- // Some old kernels will report vfp for A8, not vfpv3, so we check for
- // A8 explicitely. The cpuinfo file report the CPU Part which for Cortex
- // A8 is 0xc08.
- search_strings[1] = "0xc08";
- search_items = 2;
- ASSERT(search_items <= max_items);
+ search_string = "vfpv3";
break;
case ARMv7:
- search_strings[0] = "ARMv7" ;
- search_items = 1;
- ASSERT(search_items <= max_items);
+ search_string = "ARMv7";
break;
default:
UNREACHABLE();
}
- for (int i = 0; i < search_items; ++i) {
- if (CPUInfoContainsString(search_strings[i])) {
+ if (CPUInfoContainsString(search_string)) {
+ return true;
+ }
+
+ if (feature == VFP3) {
+ // Some old kernels will report vfp not vfpv3. Here we make a last attempt
+ // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+ // available on architectures with vfpv3.
+ // Checking neon on its own is not enough as it is possible to have neon
+ // without vfp.
+ if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
return true;
}
}
diff --git a/src/version.cc b/src/version.cc
index d9be907..7b552ee 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 2
#define MINOR_VERSION 5
#define BUILD_NUMBER 9
-#define PATCH_LEVEL 6
+#define PATCH_LEVEL 9
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 8ce7a79..8a6a677 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -814,6 +814,75 @@
}
+static void* expected_ptr;
+static v8::Handle<v8::Value> callback(const v8::Arguments& args) {
+ void* ptr = v8::External::Unwrap(args.Data());
+ CHECK_EQ(expected_ptr, ptr);
+ return v8::Boolean::New(true);
+}
+
+
+static void TestExternalPointerWrapping() {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ v8::Handle<v8::Value> data = v8::External::Wrap(expected_ptr);
+
+ v8::Handle<v8::Object> obj = v8::Object::New();
+ obj->Set(v8_str("func"),
+ v8::FunctionTemplate::New(callback, data)->GetFunction());
+ env->Global()->Set(v8_str("obj"), obj);
+
+ CHECK(CompileRun(
+ "function foo() {\n"
+ " for (var i = 0; i < 13; i++) obj.func();\n"
+ "}\n"
+ "foo(), true")->BooleanValue());
+}
+
+
+THREADED_TEST(ExternalWrap) {
+ // Check heap allocated object.
+ int* ptr = new int;
+ expected_ptr = ptr;
+ TestExternalPointerWrapping();
+ delete ptr;
+
+ // Check stack allocated object.
+ int foo;
+ expected_ptr = &foo;
+ TestExternalPointerWrapping();
+
+ // Check not aligned addresses.
+ const int n = 100;
+ char* s = new char[n];
+ for (int i = 0; i < n; i++) {
+ expected_ptr = s + i;
+ TestExternalPointerWrapping();
+ }
+
+ delete[] s;
+
+ // Check several invalid addresses.
+ expected_ptr = reinterpret_cast<void*>(1);
+ TestExternalPointerWrapping();
+
+ expected_ptr = reinterpret_cast<void*>(0xdeadbeef);
+ TestExternalPointerWrapping();
+
+ expected_ptr = reinterpret_cast<void*>(0xdeadbeef + 1);
+ TestExternalPointerWrapping();
+
+#if defined(V8_HOST_ARCH_X64)
+ expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
+ TestExternalPointerWrapping();
+
+ expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef + 1);
+ TestExternalPointerWrapping();
+#endif
+}
+
+
THREADED_TEST(FindInstanceInPrototypeChain) {
v8::HandleScope scope;
LocalContext env;