Version 3.14.5
Killed off the SCons based build.
Added a faster API for creating v8::Integer objects.
Speeded up function deoptimization by avoiding quadratic pass over optimized function list. (Chromium issue 155270)
Always invoke the default Array.sort functions from builtin functions. (issue 2372)
Reverted recent CPU profiler changes because they broke --prof. (issue 2364)
Switched code flushing to use different JSFunction field. (issue 1609)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@12787 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/.gitignore b/.gitignore
index c39f642..0bf9313 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,8 +25,8 @@
/build/Debug
/build/gyp
/build/Release
-/obj/
-/out/
+/obj
+/out
/test/cctest/cctest.status2
/test/es5conform/data
/test/message/message.status2
@@ -47,6 +47,6 @@
/tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug
/tools/visual_studio/Release
-/xcodebuild/
+/xcodebuild
TAGS
*.Makefile
diff --git a/ChangeLog b/ChangeLog
index c193154..7c435c8 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+2012-10-22: Version 3.14.5
+
+ Killed off the SCons based build.
+
+ Added a faster API for creating v8::Integer objects.
+
+ Speeded up function deoptimization by avoiding quadratic pass over
+ optimized function list. (Chromium issue 155270)
+
+ Always invoke the default Array.sort functions from builtin functions.
+ (issue 2372)
+
+ Reverted recent CPU profiler changes because they broke --prof.
+ (issue 2364)
+
+ Switched code flushing to use different JSFunction field.
+ (issue 1609)
+
+ Performance and stability improvements on all platforms.
+
+
2012-10-15: Version 3.14.4
Allow evals for debugger even if they are prohibited in the debugee
diff --git a/Makefile b/Makefile
index 99189d6..b65ea4c 100644
--- a/Makefile
+++ b/Makefile
@@ -30,7 +30,7 @@
CXX ?= g++
LINK ?= g++
OUTDIR ?= out
-TESTJOBS ?= -j16
+TESTJOBS ?=
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
@@ -83,9 +83,9 @@
endif
# vfp3=off
ifeq ($(vfp3), off)
- GYPFLAGS += -Dv8_can_use_vfp_instructions=false
+ GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
else
- GYPFLAGS += -Dv8_can_use_vfp_instructions=true
+ GYPFLAGS += -Dv8_can_use_vfp3_instructions=true
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
@@ -203,20 +203,20 @@
# Test targets.
check: all
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \
$(TESTFLAGS)
$(addsuffix .check,$(MODES)): $$(basename $$@)
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .check,$(ARCHES)): $$(basename $$@)
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS)
$(CHECKS): $$(basename $$@)
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
@@ -224,16 +224,16 @@
$(shell pwd) $(ANDROID_V8)
$(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 \
- --special-command="tools/android-run.py @"
+ --command-prefix="tools/android-run.py"
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
native.check: native
- @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
diff --git a/SConstruct b/SConstruct
index e178a91..5f8616a 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1157,6 +1157,11 @@
'default': 'on',
'help': 'use fpu instructions when building the snapshot [MIPS only]'
},
+ 'I_know_I_should_build_with_GYP': {
+ 'values': ['yes', 'no'],
+ 'default': 'no',
+ 'help': 'grace period: temporarily override SCons deprecation'
+ }
}
@@ -1257,7 +1262,35 @@
return True
+def WarnAboutDeprecation():
+ print """
+ #####################################################################
+ # #
+ # LAST WARNING: Building V8 with SCons is deprecated. #
+ # #
+ # This only works because you have overridden the kill switch. #
+ # #
+ # MIGRATE TO THE GYP-BASED BUILD NOW! #
+ # #
+ # Instructions: http://code.google.com/p/v8/wiki/BuildingWithGYP. #
+ # #
+ #####################################################################
+ """
+
+
def VerifyOptions(env):
+ if env['I_know_I_should_build_with_GYP'] != 'yes':
+ Abort("Building V8 with SCons is no longer supported. Please use GYP "
+ "instead; you can find instructions are at "
+ "http://code.google.com/p/v8/wiki/BuildingWithGYP.\n\n"
+ "Quitting.\n\n"
+ "For a limited grace period, you can specify "
+ "\"I_know_I_should_build_with_GYP=yes\" to override.")
+ else:
+ WarnAboutDeprecation()
+ import atexit
+ atexit.register(WarnAboutDeprecation)
+
if not IsLegal(env, 'mode', ['debug', 'release']):
return False
if not IsLegal(env, 'sample', ["shell", "process", "lineprocessor"]):
@@ -1600,18 +1633,4 @@
except:
pass
-
-def WarnAboutDeprecation():
- print """
-#######################################################
-# WARNING: Building V8 with SCons is deprecated and #
-# will not work much longer. Please switch to using #
-# the GYP-based build now. Instructions are at #
-# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
-#######################################################
- """
-
-WarnAboutDeprecation()
-import atexit
-atexit.register(WarnAboutDeprecation)
Build()
diff --git a/build/common.gypi b/build/common.gypi
index dc5456b..78888b8 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -357,7 +357,6 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
- 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['OS=="android"', {
'variables': {
diff --git a/include/v8.h b/include/v8.h
index ff44fdf..245dc5a 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -390,7 +390,7 @@
*/
inline void MakeWeak(void* parameters, WeakReferenceCallback callback);
- /** Clears the weak reference to this object.*/
+ /** Clears the weak reference to this object. */
inline void ClearWeak();
/**
@@ -402,14 +402,13 @@
*/
inline void MarkIndependent();
- /**
- *Checks if the handle holds the only reference to an object.
- */
+ /** Returns true if this handle was previously marked as independent. */
+ inline bool IsIndependent() const;
+
+ /** Checks if the handle holds the only reference to an object. */
inline bool IsNearDeath() const;
- /**
- * Returns true if the handle's reference is weak.
- */
+ /** Returns true if the handle's reference is weak. */
inline bool IsWeak() const;
/**
@@ -418,6 +417,12 @@
*/
inline void SetWrapperClassId(uint16_t class_id);
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class
+ * ID was previously assigned.
+ */
+ inline uint16_t WrapperClassId() const;
+
private:
friend class ImplementationUtilities;
friend class ObjectTemplate;
@@ -1384,6 +1389,8 @@
public:
V8EXPORT static Local<Integer> New(int32_t value);
V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value);
+ V8EXPORT static Local<Integer> New(int32_t value, Isolate*);
+ V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
V8EXPORT int64_t Value() const;
static inline Integer* Cast(v8::Value* obj);
private:
@@ -2653,7 +2660,7 @@
typedef void (*FatalErrorCallback)(const char* location, const char* message);
-typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> data);
+typedef void (*MessageCallback)(Handle<Message> message, Handle<Value> error);
/**
@@ -3011,7 +3018,7 @@
/**
- * Interface for iterating though all external resources in the heap.
+ * Interface for iterating through all external resources in the heap.
*/
class V8EXPORT ExternalResourceVisitor { // NOLINT
public:
@@ -3021,6 +3028,17 @@
/**
+ * Interface for iterating through all the persistent handles in the heap.
+ */
+class V8EXPORT PersistentHandleVisitor { // NOLINT
+ public:
+ virtual ~PersistentHandleVisitor() {}
+ virtual void VisitPersistentHandle(Persistent<Value> value,
+ uint16_t class_id) {}
+};
+
+
+/**
* Container class for static utility functions.
*/
class V8EXPORT V8 {
@@ -3085,8 +3103,7 @@
* The same message listener can be added more than once and in that
* case it will be called more than once for each message.
*/
- static bool AddMessageListener(MessageCallback that,
- Handle<Value> data = Handle<Value>());
+ static bool AddMessageListener(MessageCallback that);
/**
* Remove all message listeners from the specified callback function.
@@ -3428,6 +3445,12 @@
static void VisitExternalResources(ExternalResourceVisitor* visitor);
/**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids.
+ */
+ static void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
+
+ /**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
@@ -3465,10 +3488,12 @@
WeakReferenceCallback);
static void ClearWeak(internal::Object** global_handle);
static void MarkIndependent(internal::Object** global_handle);
+ static bool IsGlobalIndependent(internal::Object** global_handle);
static bool IsGlobalNearDeath(internal::Object** global_handle);
static bool IsGlobalWeak(internal::Object** global_handle);
static void SetWrapperClassId(internal::Object** global_handle,
uint16_t class_id);
+ static uint16_t GetWrapperClassId(internal::Object** global_handle);
template <class T> friend class Handle;
template <class T> friend class Local;
@@ -4186,6 +4211,13 @@
template <class T>
+bool Persistent<T>::IsIndependent() const {
+ if (this->IsEmpty()) return false;
+ return V8::IsGlobalIndependent(reinterpret_cast<internal::Object**>(**this));
+}
+
+
+template <class T>
bool Persistent<T>::IsNearDeath() const {
if (this->IsEmpty()) return false;
return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
@@ -4231,6 +4263,11 @@
V8::SetWrapperClassId(reinterpret_cast<internal::Object**>(**this), class_id);
}
+template <class T>
+uint16_t Persistent<T>::WrapperClassId() const {
+ return V8::GetWrapperClassId(reinterpret_cast<internal::Object**>(**this));
+}
+
Arguments::Arguments(internal::Object** implicit_args,
internal::Object** values, int length,
bool is_construct_call)
diff --git a/src/api.cc b/src/api.cc
index dcadf52..e0ad29b 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -648,6 +648,14 @@
}
+bool V8::IsGlobalIndependent(i::Object** obj) {
+ i::Isolate* isolate = i::Isolate::Current();
+ LOG_API(isolate, "IsGlobalIndependent");
+ if (!isolate->IsInitialized()) return false;
+ return i::GlobalHandles::IsIndependent(obj);
+}
+
+
bool V8::IsGlobalNearDeath(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalNearDeath");
@@ -4336,6 +4344,30 @@
}
+void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
+
+ i::AssertNoAllocation no_allocation;
+
+ class VisitorAdapter : public i::ObjectVisitor {
+ public:
+ explicit VisitorAdapter(PersistentHandleVisitor* visitor)
+ : visitor_(visitor) {}
+ virtual void VisitPointers(i::Object** start, i::Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
+ class_id);
+ }
+ private:
+ PersistentHandleVisitor* visitor_;
+ } visitor_adapter(visitor);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
+}
+
+
bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@@ -4620,6 +4652,11 @@
}
+uint16_t V8::GetWrapperClassId(internal::Object** global_handle) {
+ return i::GlobalHandles::GetWrapperClassId(global_handle);
+}
+
+
Local<v8::Object> ObjectTemplate::NewInstance() {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
@@ -5191,24 +5228,39 @@
Local<Integer> v8::Integer::New(int32_t value) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
- if (i::Smi::IsValid(value)) {
- return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
- isolate));
- }
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
- return Utils::IntegerToLocal(result);
+ return v8::Integer::New(value, reinterpret_cast<Isolate*>(isolate));
}
Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()");
+ return Integer::NewFromUnsigned(value, reinterpret_cast<Isolate*>(isolate));
+}
+
+
+Local<Integer> v8::Integer::New(int32_t value, Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ASSERT(internal_isolate->IsInitialized());
+ if (i::Smi::IsValid(value)) {
+ return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
+ internal_isolate));
+ }
+ ENTER_V8(internal_isolate);
+ i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
+ return Utils::IntegerToLocal(result);
+}
+
+
+Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ASSERT(internal_isolate->IsInitialized());
bool fits_into_int32_t = (value & (1 << 31)) == 0;
if (fits_into_int32_t) {
- return Integer::New(static_cast<int32_t>(value));
+ return Integer::New(static_cast<int32_t>(value), isolate);
}
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- i::Handle<i::Object> result = isolate->factory()->NewNumber(value);
+ ENTER_V8(internal_isolate);
+ i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
@@ -5218,19 +5270,14 @@
}
-bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
+bool V8::AddMessageListener(MessageCallback that) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(2);
- obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
- obj.set(1, data.IsEmpty() ?
- isolate->heap()->undefined_value() :
- *Utils::OpenHandle(*data));
- listeners.add(obj.value());
+ listeners.add(isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
return true;
}
@@ -5245,8 +5292,7 @@
for (int i = 0; i < listeners.length(); i++) {
if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
- NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+ i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listeners.get(i)));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners.set(i, isolate->heap()->undefined_value());
}
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index c47c094..6268c33 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -75,7 +75,7 @@
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+ return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
}
@@ -86,7 +86,8 @@
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(target) & ~3));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -97,25 +98,30 @@
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(Assembler::target_address_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
+ return Handle<Object>(reinterpret_cast<Object**>(
+ Assembler::target_pointer_at(pc_)));
}
Object** RelocInfo::target_object_address() {
+ // Provide a "natural pointer" to the embedded object,
+ // which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+ reconstructed_obj_ptr_ =
+ reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
+ return &reconstructed_obj_ptr_;
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -127,7 +133,8 @@
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+ reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+ return &reconstructed_adr_ptr_;
}
@@ -326,7 +333,7 @@
}
-Address Assembler::target_address_address_at(Address pc) {
+Address Assembler::target_pointer_address_at(Address pc) {
Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
@@ -356,8 +363,63 @@
}
-Address Assembler::target_address_at(Address pc) {
- return Memory::Address_at(target_address_address_at(pc));
+Address Assembler::target_pointer_at(Address pc) {
+ if (IsMovW(Memory::int32_at(pc))) {
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ Instruction* instr = Instruction::At(pc);
+ Instruction* next_instr = Instruction::At(pc + kInstrSize);
+ return reinterpret_cast<Address>(
+ (next_instr->ImmedMovwMovtValue() << 16) |
+ instr->ImmedMovwMovtValue());
+ }
+ return Memory::Address_at(target_pointer_address_at(pc));
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ // Returns the address of the call target from the return address that will
+ // be returned to after a call.
+#ifdef USE_BLX
+ // Call sequence on V7 or later is :
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // Or pre-V7 or cases that need frequent patching:
+ // ldr ip, [pc, #...] @ call address
+ // blx ip
+ // @ return address
+ Address candidate = pc - 2 * Assembler::kInstrSize;
+ Instr candidate_instr(Memory::int32_at(candidate));
+ if (IsLdrPcImmediateOffset(candidate_instr)) {
+ return candidate;
+ }
+ candidate = pc - 3 * Assembler::kInstrSize;
+ ASSERT(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + kInstrSize)));
+ return candidate;
+#else
+ // Call sequence is:
+ // mov lr, pc
+ // ldr pc, [pc, #...] @ call address
+ // @ return address
+ return pc - kInstrSize;
+#endif
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+#ifdef USE_BLX
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ return pc + kInstrSize * 2;
+ } else {
+ ASSERT(IsMovW(Memory::int32_at(pc)));
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ return pc + kInstrSize * 3;
+ }
+#else
+ return pc + kInstrSize;
+#endif
}
@@ -373,17 +435,55 @@
}
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::Address_at(target_address_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to flush the instruction cache
- // after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction was actually patched by the assignment
- // above; the target address is not part of an instruction, it is patched in
- // the constant pool and is read via a data access; the instruction accessing
- // this address in the constant pool remains unchanged.
+static Instr EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
+
+void Assembler::set_target_pointer_at(Address pc, Address target) {
+ if (IsMovW(Memory::int32_at(pc))) {
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ uint32_t intermediate = instr_ptr[0];
+ intermediate &= ~EncodeMovwImmediate(0xFFFF);
+ intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
+ instr_ptr[0] = intermediate;
+ intermediate = instr_ptr[1];
+ intermediate &= ~EncodeMovwImmediate(0xFFFF);
+ intermediate |= EncodeMovwImmediate(immediate >> 16);
+ instr_ptr[1] = intermediate;
+ ASSERT(IsMovW(Memory::int32_at(pc)));
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ CPU::FlushICache(pc, 2 * kInstrSize);
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to always flush the
+ // instruction cache after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction is actually patched in the case
+ // of embedded constants of the form:
+ // ldr ip, [pc, #...]
+ // since the instruction accessing this address in the constant pool remains
+ // unchanged.
+ }
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ set_target_pointer_at(pc, reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(target) & ~3));
+}
+
+
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 129e62b..9be62a4 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -77,6 +77,9 @@
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
#endif // _arm__
+ if (answer & (1u << ARMv7)) {
+ answer |= 1u << UNALIGNED_ACCESSES;
+ }
return answer;
}
@@ -114,6 +117,10 @@
if (FLAG_enable_sudiv) {
supported_ |= 1u << SUDIV;
}
+
+ if (FLAG_enable_movw_movt) {
+ supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
@@ -133,6 +140,15 @@
found_by_runtime_probing_ |= 1u << SUDIV;
}
+ if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
+ found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
+ }
+
+ if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
+ OS::ArmCpuHasFeature(ARMv7)) {
+ found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
+ }
+
supported_ |= found_by_runtime_probing_;
#endif
@@ -723,12 +739,6 @@
}
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
@@ -793,7 +803,7 @@
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Operand::must_use_constant_pool(const Assembler* assembler) const {
+bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
@@ -809,25 +819,28 @@
}
+static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
+ if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
+ return true;
+ }
+ if (x.must_output_reloc_info(assembler)) {
+ return false;
+ }
+ return CpuFeatures::IsSupported(ARMv7);
+}
+
+
bool Operand::is_single_instruction(const Assembler* assembler,
Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
- if (must_use_constant_pool(assembler) ||
+ if (must_output_reloc_info(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool(assembler) ||
- !CpuFeatures::IsSupported(ARMv7)) {
- // mov instruction will be an ldr from constant pool (one instruction).
- return true;
- } else {
- // mov instruction will be a mov or movw followed by movt (two
- // instructions).
- return false;
- }
+ return !use_movw_movt(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@@ -843,6 +856,29 @@
}
+void Assembler::move_32_bit_immediate(Condition cond,
+ Register rd,
+ SBit s,
+ const Operand& x) {
+ if (rd.code() != pc.code() && s == LeaveCC) {
+ if (use_movw_movt(x, this)) {
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | rd.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ return;
+ }
+ }
+
+ RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
+ ldr(rd, MemOperand(pc, 0), cond);
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@@ -853,7 +889,7 @@
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_use_constant_pool(this) ||
+ if (x.must_output_reloc_info(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@@ -862,24 +898,19 @@
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool(this) ||
- !CpuFeatures::IsSupported(ARMv7)) {
- RecordRelocInfo(x.rmode_, x.imm32_);
- ldr(rd, MemOperand(pc, 0), cond);
- } else {
- // Will probably use movw, will certainly not use constant pool.
- mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- }
+ move_32_bit_immediate(cond, rd, LeaveCC, x);
} else {
- // If this is not a mov or mvn instruction we may still be able to avoid
- // a constant pool entry by using mvn or movw.
- if (!x.must_use_constant_pool(this) &&
- (instr & kMovMvnMask) != kMovMvnPattern) {
- mov(ip, x, LeaveCC, cond);
- } else {
- RecordRelocInfo(x.rmode_, x.imm32_);
+ if ((instr & kMovMvnMask) == kMovMvnPattern) {
+ // Moves need to use a constant pool entry.
+ RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
ldr(ip, MemOperand(pc, 0), cond);
+ } else if (x.must_output_reloc_info(this)) {
+ // Otherwise, use most efficient form of fetching from constant pool.
+ move_32_bit_immediate(cond, ip, LeaveCC, x);
+ } else {
+ // If this is not a mov or mvn instruction we may still be able to
+ // avoid a constant pool entry by using mvn or movw.
+ mov(ip, x, LeaveCC, cond);
}
addrmod1(instr, rn, rd, Operand(ip));
}
@@ -1186,6 +1217,9 @@
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
ASSERT(immediate < 0x10000);
+ // May use movw if supported, but on unsupported platforms will try to use
+ // equivalent rotated immed_8 value and other tricks before falling back to a
+ // constant pool load.
mov(reg, Operand(immediate), LeaveCC, cond);
}
@@ -1415,7 +1449,7 @@
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_use_constant_pool(this) ||
+ if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
@@ -2433,15 +2467,35 @@
// Pseudo instructions.
void Assembler::nop(int type) {
- // This is mov rx, rx.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
+ // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
+ // some of the CPU's pipeline and has to issue. Older ARM chips simply used
+ // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
+ // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
+ // a type.
+ ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
emit(al | 13*B21 | type*B12 | type);
}
+bool Assembler::IsMovT(Instr instr) {
+ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
+ ((kNumRegisters-1)*B12) | // mask out register
+ EncodeMovwImmediate(0xFFFF)); // mask out immediate value
+ return instr == 0x34*B20;
+}
+
+
+bool Assembler::IsMovW(Instr instr) {
+ instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
+ ((kNumRegisters-1)*B12) | // mask out destination
+ EncodeMovwImmediate(0xFFFF)); // mask out immediate value
+ return instr == 0x30*B20;
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
+ ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type.
- ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
@@ -2557,18 +2611,21 @@
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
+ UseConstantPoolMode mode) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL)) {
+ (rmode == RelocInfo::CONST_POOL) ||
+ mode == DONT_USE_CONSTANT_POOL) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode));
+ || RelocInfo::IsConstPool(rmode)
+ || mode == DONT_USE_CONSTANT_POOL);
// These modes do not need an entry in the constant pool.
} else {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
@@ -2687,17 +2744,19 @@
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- ASSERT(IsLdrPcImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0);
+ if (IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0) {
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+ ASSERT(is_uint12(delta));
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
- ASSERT(is_uint12(delta));
-
- instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ } else {
+ ASSERT(IsMovW(instr));
+ }
emit(rinfo.data());
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 005c78d..dfcce60 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -425,7 +425,7 @@
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
- bool must_use_constant_pool(const Assembler* assembler) const;
+ bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
@@ -512,6 +512,9 @@
if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false;
+ if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
+ return false;
+ }
return (supported_ & (1u << f)) != 0;
}
@@ -686,13 +689,25 @@
void label_at_put(Label* L, int at_offset);
// Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc.
- INLINE(static Address target_address_address_at(Address pc));
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_pointer_address_at(Address pc));
+
+ // Read/Modify the pointer in the branch/call/move instruction at pc.
+ INLINE(static Address target_pointer_at(Address pc));
+ INLINE(static void set_target_pointer_at(Address pc, Address target));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ INLINE(static Address target_address_from_return_address(Address pc));
+
+ // Given the address of the beginning of a call, return the address
+ // in the instruction stream that the call will return from.
+ INLINE(static Address return_address_from_call_start(Address pc));
+
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -711,22 +726,6 @@
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-#ifdef USE_BLX
- // Call sequence is:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- static const int kCallTargetAddressOffset = 2 * kInstrSize;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- static const int kCallTargetAddressOffset = kInstrSize;
-#endif
-
// Distance between start of patched return sequence and the emitted address
// to jump to.
#ifdef USE_BLX
@@ -755,6 +754,12 @@
static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
#endif
+#ifdef USE_BLX
+ static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
+#else
+ static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
+#endif
+
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
@@ -1182,6 +1187,20 @@
bool predictable_code_size() const { return predictable_code_size_; }
+ static bool use_immediate_embedded_pointer_loads(
+ const Assembler* assembler) {
+#ifdef USE_BLX
+ return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size());
+#else
+ // If not using BLX, all loads from the constant pool cannot be immediate,
+ // because the ldr pc, [pc + #xxxx] used for calls must be a single
+ // instruction and cannot be easily distinguished out of context from
+ // other loads that could use movw/movt.
+ return false;
+#endif
+ }
+
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1302,6 +1321,8 @@
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+ static bool IsMovT(Instr instr);
+ static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
@@ -1440,6 +1461,12 @@
void GrowBuffer();
inline void emit(Instr x);
+ // 32-bit immediate values
+ void move_32_bit_immediate(Condition cond,
+ Register rd,
+ SBit s,
+ const Operand& x);
+
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x);
@@ -1453,8 +1480,14 @@
void link_to(Label* L, Label* appendix);
void next(Label* L);
+ enum UseConstantPoolMode {
+ USE_CONSTANT_POOL,
+ DONT_USE_CONSTANT_POOL
+ };
+
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
+ UseConstantPoolMode mode = USE_CONSTANT_POOL);
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
@@ -1479,6 +1512,26 @@
};
+class PredictableCodeSizeScope {
+ public:
+ explicit PredictableCodeSizeScope(Assembler* assembler)
+ : asm_(assembler) {
+ old_value_ = assembler->predictable_code_size();
+ assembler->set_predictable_code_size(true);
+ }
+
+ ~PredictableCodeSizeScope() {
+ if (!old_value_) {
+ asm_->set_predictable_code_size(false);
+ }
+ }
+
+ private:
+ Assembler* asm_;
+ bool old_value_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index a6aff15..ceb108f 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -7569,6 +7569,7 @@
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
+ PredictableCodeSizeScope predictable(masm);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
@@ -7580,7 +7581,7 @@
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + Assembler::kInstrSize;
+ 3 * Assembler::kInstrSize;
// Save live volatile registers.
__ Push(lr, r5, r1);
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 5aadc3c..4fa49e3 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -75,10 +75,6 @@
#endif
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-
// Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
@@ -691,6 +687,9 @@
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
+ // Test for a nop instruction, which falls under type 1.
+ inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
+
// Test for a stop instruction.
inline bool IsStop() const {
return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 3e7a1e9..c2941be 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -48,7 +48,7 @@
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
- // #if USE_BLX
+ // #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
@@ -99,7 +99,7 @@
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
- // #if USE_BLX
+ // #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 5339be1..19667b9 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -104,19 +104,7 @@
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- Context* context = function->context()->native_context();
- Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- SharedFunctionInfo* shared = function->shared();
- while (!element->IsUndefined()) {
- JSFunction* func = JSFunction::cast(element);
- // Grab element before code replacement as ReplaceCode alters the list.
- element = func->next_function_link();
- if (func->code() == code) {
- func->ReplaceCode(shared->code());
- }
- }
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 7c5f63e..3c94a46 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -830,6 +830,8 @@
} else {
Unknown(instr); // not used by V8
}
+ } else if ((type == 1) && instr->IsNopType1()) {
+ Format(instr, "nop'cond");
} else {
switch (instr->OpcodeField()) {
case AND: {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 1209372..be82283 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -287,6 +287,7 @@
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -364,6 +365,7 @@
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
}
@@ -437,6 +439,7 @@
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ PredictableCodeSizeScope predictable(masm_);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@@ -1137,7 +1140,7 @@
__ cmp(r1, Operand(Smi::FromInt(0)));
__ b(eq, &no_descriptors);
- __ LoadInstanceDescriptors(r0, r2, r4);
+ __ LoadInstanceDescriptors(r0, r2);
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -2239,7 +2242,9 @@
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- __ Call(code, rmode, ast_id);
+ // All calls must have a predictable size in full-codegen code to ensure that
+ // the debugger can patch them correctly.
+ __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
@@ -2683,7 +2688,7 @@
__ cmp(r3, Operand(0));
__ b(eq, &done);
- __ LoadInstanceDescriptors(r1, r4, r2);
+ __ LoadInstanceDescriptors(r1, r4);
// r4: descriptor array.
// r3: valid entries in the descriptor array.
STATIC_ASSERT(kSmiTag == 0);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 87d09c0..4839589 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1734,7 +1734,7 @@
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
+ Assembler::return_address_from_call_start(address);
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index d513083..21c549f 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2294,9 +2294,7 @@
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
- LOperand* scratch = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map, scratch)));
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 5e1c991..fb36fe9 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -2398,15 +2398,13 @@
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 1> {
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LForInCacheArray(LOperand* map, LOperand* scratch) {
+ explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
- temps_[0] = scratch;
}
LOperand* map() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index d5b67a7..6f5aa43 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -608,22 +608,24 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+ LInstruction* instr,
+ TargetAddressStorageMode storage_mode) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
}
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- SafepointMode safepoint_mode) {
+ SafepointMode safepoint_mode,
+ TargetAddressStorageMode storage_mode) {
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- __ Call(code, mode);
+ __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
@@ -2471,6 +2473,7 @@
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
+ PredictableCodeSizeScope predictable(masm_);
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
@@ -2532,6 +2535,9 @@
ASSERT(temp.is(r4));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 5;
+ // Make sure that code size is predicable, since we use specific constants
+ // offsets in the code to find embedded values..
+ PredictableCodeSizeScope predictable(masm_);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
@@ -2795,7 +2801,7 @@
if (need_generic) {
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
__ bind(&done);
}
@@ -2808,7 +2814,7 @@
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3119,7 +3125,7 @@
ASSERT(ToRegister(instr->key()).is(r0));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3814,7 +3820,7 @@
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3827,7 +3833,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3852,7 +3858,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr);
+ CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3952,7 +3958,7 @@
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4166,7 +4172,7 @@
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -5541,6 +5547,7 @@
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
+ PredictableCodeSizeScope predictable(masm_);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
@@ -5621,7 +5628,6 @@
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->scratch());
Label load_cache, done;
__ EnumLength(result, map);
__ cmp(result, Operand(Smi::FromInt(0)));
@@ -5630,7 +5636,7 @@
__ jmp(&done);
__ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result, scratch);
+ __ LoadInstanceDescriptors(map, result);
__ ldr(result,
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index ac46f05..9281537 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -213,14 +213,18 @@
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
+ void CallCode(
+ Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
+ void CallCodeGeneric(
+ Handle<Code> code,
+ RelocInfo::Mode mode,
+ LInstruction* instr,
+ SafepointMode safepoint_mode,
+ TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 4da5387..623bd6a 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -108,7 +108,7 @@
int MacroAssembler::CallSize(Register target, Condition cond) {
-#if USE_BLX
+#ifdef USE_BLX
return kInstrSize;
#else
return 2 * kInstrSize;
@@ -121,7 +121,7 @@
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#if USE_BLX
+#ifdef USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
@@ -158,15 +158,29 @@
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
- Condition cond) {
+ Condition cond,
+ TargetAddressStorageMode mode) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
-#if USE_BLX
- // On ARMv5 and after the recommended call sequence is:
- // ldr ip, [pc, #...]
- // blx ip
+
+ bool old_predictable_code_size = predictable_code_size();
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
+ set_predictable_code_size(true);
+ }
+
+#ifdef USE_BLX
+ // Call sequence on V7 or later may be :
+ // movw ip, #... @ call address low 16
+ // movt ip, #... @ call address high 16
+ // blx ip
+ // @ return address
+ // Or for pre-V7 or values that may be back-patched
+ // to avoid ICache flushes:
+ // ldr ip, [pc, #...] @ call address
+ // blx ip
+ // @ return address
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
@@ -177,15 +191,16 @@
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
- ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ if (mode == NEVER_INLINE_TARGET_ADDRESS) {
+ set_predictable_code_size(old_predictable_code_size);
+ }
}
@@ -200,7 +215,8 @@
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
- Condition cond) {
+ Condition cond,
+ TargetAddressStorageMode mode) {
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
@@ -209,9 +225,7 @@
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<Address>(code.location()), rmode, cond);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
- SizeOfCodeGeneratedSince(&start));
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
@@ -288,17 +302,15 @@
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
- !src2.must_use_constant_pool(this) &&
+ !src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
-
} else if (!src2.is_single_instruction(this) &&
- !src2.must_use_constant_pool(this) &&
+ !src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
-
} else {
and_(dst, src1, src2, LeaveCC, cond);
}
@@ -3231,17 +3243,17 @@
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
-#if CAN_USE_UNALIGNED_ACCESSES
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
-#else
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
-#endif
+ if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
+ } else {
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ mov(scratch, Operand(scratch, LSR, 8));
+ strb(scratch, MemOperand(dst, 1, PostIndex));
+ }
sub(length, length, Operand(kPointerSize));
b(&word_loop);
@@ -3740,31 +3752,8 @@
void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors,
- Register scratch) {
- Register temp = descriptors;
- ldr(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset));
-
- Label ok, fail, load_from_back_pointer;
- CheckMap(temp,
- scratch,
- isolate()->factory()->fixed_array_map(),
- &fail,
- DONT_DO_SMI_CHECK);
- ldr(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
- jmp(&ok);
-
- bind(&fail);
- CompareRoot(temp, Heap::kUndefinedValueRootIndex);
- b(ne, &load_from_back_pointer);
- mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
- jmp(&ok);
-
- bind(&load_from_back_pointer);
- ldr(temp, FieldMemOperand(temp, Map::kTransitionsOrBackPointerOffset));
- ldr(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
-
- bind(&ok);
+ Register descriptors) {
+ ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index c7032ba..e3e39a3 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -102,6 +102,11 @@
#endif
+enum TargetAddressStorageMode {
+ CAN_INLINE_TARGET_ADDRESS,
+ NEVER_INLINE_TARGET_ADDRESS
+};
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -121,7 +126,9 @@
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode,
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
@@ -129,7 +136,8 @@
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
+ Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -1281,9 +1289,7 @@
DoubleRegister temp_double_reg);
- void LoadInstanceDescriptors(Register map,
- Register descriptors,
- Register scratch);
+ void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 66cdd84..17b8677 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -1358,6 +1358,11 @@
}
+bool RegExpMacroAssemblerARM::CanReadUnaligned() {
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
+}
+
+
void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
@@ -1370,9 +1375,9 @@
// and the operating system running on the target allow it.
// If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time.
-#if !V8_TARGET_CAN_READ_UNALIGNED
- ASSERT(characters == 1);
-#endif
+ if (!CanReadUnaligned()) {
+ ASSERT(characters == 1);
+ }
if (mode_ == ASCII) {
if (characters == 4) {
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index f723fa2..c45669a 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -109,6 +109,7 @@
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 91df404..5b8ba2a 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1066,111 +1066,83 @@
int Simulator::ReadW(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
+ V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
-#endif
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
+ } else {
+ PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
+ V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- return;
-#else
- if ((addr & 1) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
- return;
+ } else {
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ UNIMPLEMENTED();
}
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
-#endif
}
@@ -1199,37 +1171,26 @@
int32_t* Simulator::ReadDW(int32_t addr) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
+ } else {
+ PrintF("Unaligned read at 0x%08x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
}
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
-#endif
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
-#if V8_TARGET_CAN_READ_UNALIGNED
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- return;
-#else
- if ((addr & 3) == 0) {
+ if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
- return;
+ } else {
+ PrintF("Unaligned write at 0x%08x\n", addr);
+ UNIMPLEMENTED();
}
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
-#endif
}
@@ -2222,6 +2183,8 @@
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
+ } else if ((type == 1) && instr->IsNopType1()) {
+ // NOP.
} else {
int rd = instr->RdValue();
int rn = instr->RnValue();
diff --git a/src/array.js b/src/array.js
index 1cedd8d..250c30c 100644
--- a/src/array.js
+++ b/src/array.js
@@ -62,7 +62,7 @@
}
}
}
- keys.sort(function(a, b) { return a - b; });
+ %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
return keys;
}
diff --git a/src/assembler.h b/src/assembler.h
index 0bf28ae..a0e55cc 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -368,19 +368,17 @@
Mode rmode_;
intptr_t data_;
Code* host_;
-#ifdef V8_TARGET_ARCH_MIPS
- // Code and Embedded Object pointers in mips are stored split
+ // Code and Embedded Object pointers on some platforms are stored split
// across two consecutive 32-bit instructions. Heap management
// routines expect to access these pointers indirectly. The following
- // location provides a place for these pointers to exist natually
+ // location provides a place for these pointers to exist naturally
// when accessed via the Iterator.
Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
- // in mips, but are accessed via indirect pointers. This location
+ // on some platforms, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
// is returned by RelocInfo::target_reference_address().
Address reconstructed_adr_ptr_;
-#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator;
};
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 4b40d92..a368eef 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -397,7 +397,7 @@
}
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
- Map::SetDescriptors(map, descriptors);
+ map->set_instance_descriptors(*descriptors);
{ // Add length.
CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
@@ -538,7 +538,7 @@
}
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- Map::SetDescriptors(map, descriptors);
+ map->set_instance_descriptors(*descriptors);
{ // Add length.
CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
@@ -875,7 +875,7 @@
Handle<Foreign> array_length(factory->NewForeign(&Accessors::ArrayLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- Map::SetDescriptors(initial_map, array_descriptors);
+ initial_map->set_instance_descriptors(*array_descriptors);
{ // Add length.
CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
@@ -924,7 +924,7 @@
factory->NewForeign(&Accessors::StringLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
- Map::SetDescriptors(string_map, string_descriptors);
+ string_map->set_instance_descriptors(*string_descriptors);
{ // Add length.
CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
@@ -960,7 +960,7 @@
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 5);
DescriptorArray::WhitenessWitness witness(*descriptors);
- Map::SetDescriptors(initial_map, descriptors);
+ initial_map->set_instance_descriptors(*descriptors);
{
// ECMA-262, section 15.10.7.1.
@@ -1144,7 +1144,7 @@
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*descriptors);
- Map::SetDescriptors(map, descriptors);
+ map->set_instance_descriptors(*descriptors);
{ // length
FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
@@ -1534,7 +1534,7 @@
factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- Map::SetDescriptors(script_map, script_descriptors);
+ script_map->set_instance_descriptors(*script_descriptors);
{
CallbacksDescriptor d(
@@ -1675,7 +1675,7 @@
&Accessors::ArrayLength));
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
- Map::SetDescriptors(initial_map, array_descriptors);
+ initial_map->set_instance_descriptors(*array_descriptors);
{ // Add length.
CallbacksDescriptor d(
@@ -1770,7 +1770,7 @@
Handle<DescriptorArray> reresult_descriptors =
factory()->NewDescriptorArray(0, 3);
DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
- Map::SetDescriptors(initial_map, reresult_descriptors);
+ initial_map->set_instance_descriptors(*reresult_descriptors);
{
JSFunction* array_function = native_context()->array_function();
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index 1133b20..4982197 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -31,6 +31,7 @@
#include "cpu-profiler.h"
#include <new>
+#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
@@ -55,18 +56,11 @@
}
-TickSample* ProfilerEventsProcessor::StartTickSampleEvent() {
- if (!ticks_buffer_is_empty_ || ticks_buffer_is_initialized_) return NULL;
- ticks_buffer_is_initialized_ = true;
+TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
- ticks_buffer_ = TickSampleEventRecord(enqueue_order_);
- return &ticks_buffer_.sample;
-}
-
-
-void ProfilerEventsProcessor::FinishTickSampleEvent() {
- ASSERT(ticks_buffer_is_initialized_ && ticks_buffer_is_empty_);
- ticks_buffer_is_empty_ = false;
+ TickSampleEventRecord* evt =
+ new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
+ return &evt->sample;
}
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 8f72c17..3cbac77 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -39,19 +39,19 @@
namespace v8 {
namespace internal {
+static const int kEventsBufferSize = 256 * KB;
+static const int kTickSamplesBufferChunkSize = 64 * KB;
+static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
- int period_in_useconds)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
- sampler_(sampler),
running_(true),
- period_in_useconds_(period_in_useconds),
- ticks_buffer_is_empty_(true),
- ticks_buffer_is_initialized_(false),
+ ticks_buffer_(sizeof(TickSampleEventRecord),
+ kTickSamplesBufferChunkSize,
+ kTickSamplesBufferChunksCount),
enqueue_order_(0) {
}
@@ -215,17 +215,23 @@
generator_->RecordTickSample(record.sample);
}
- if (ticks_buffer_is_empty_) return !ticks_from_vm_buffer_.IsEmpty();
- if (ticks_buffer_.order == dequeue_order) {
+ const TickSampleEventRecord* rec =
+ TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
+ if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
+ // Make a local copy of tick sample record to ensure that it won't
+ // be modified as we are processing it. This is possible as the
+ // sampler writes w/o any sync to the queue, so if the processor
+ // will get far behind, a record may be modified right under its
+ // feet.
+ TickSampleEventRecord record = *rec;
+ if (record.order == dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
- if (ticks_buffer_.sample.frames_count < 0
- || ticks_buffer_.sample.frames_count > TickSample::kMaxFramesCount) {
- ticks_buffer_.sample.frames_count = 0;
- }
- generator_->RecordTickSample(ticks_buffer_.sample);
- ticks_buffer_is_empty_ = true;
- ticks_buffer_is_initialized_ = false;
+ if (record.sample.frames_count < 0
+ || record.sample.frames_count > TickSample::kMaxFramesCount)
+ record.sample.frames_count = 0;
+ generator_->RecordTickSample(record.sample);
+ ticks_buffer_.FinishDequeue();
} else {
return true;
}
@@ -233,29 +239,22 @@
}
-void ProfilerEventsProcessor::ProcessEventsQueue(int64_t stop_time,
- unsigned* dequeue_order) {
- while (OS::Ticks() < stop_time) {
- if (ProcessTicks(*dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(dequeue_order);
- }
- }
-}
-
-
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- int64_t stop_time = OS::Ticks() + period_in_useconds_;
- if (sampler_ != NULL) {
- sampler_->DoSample();
+ // Process ticks until we have any.
+ if (ProcessTicks(dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(&dequeue_order);
}
- ProcessEventsQueue(stop_time, &dequeue_order);
+ YieldCPU();
}
+ // Process remaining tick events.
+ ticks_buffer_.FlushResidualRecords();
+ // Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
}
@@ -311,22 +310,15 @@
}
-TickSample* CpuProfiler::StartTickSampleEvent(Isolate* isolate) {
+TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
- return isolate->cpu_profiler()->processor_->StartTickSampleEvent();
+ return isolate->cpu_profiler()->processor_->TickSampleEvent();
} else {
return NULL;
}
}
-void CpuProfiler::FinishTickSampleEvent(Isolate* isolate) {
- if (CpuProfiler::is_profiling(isolate)) {
- isolate->cpu_profiler()->processor_->FinishTickSampleEvent();
- }
-}
-
-
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
@@ -494,15 +486,13 @@
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
- Sampler* sampler = isolate->logger()->sampler();
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_,
- sampler,
- FLAG_cpu_profiler_sampling_period);
+ processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
+ processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@@ -515,12 +505,12 @@
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
sampler->IncreaseProfilingDepth();
- processor_->Start();
}
}
@@ -555,16 +545,16 @@
void CpuProfiler::StopProcessor() {
- NoBarrier_Store(&is_profiling_, false);
- processor_->Stop();
- processor_->Join();
Logger* logger = Isolate::Current()->logger();
- Sampler* sampler = logger->sampler();
+ Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
+ NoBarrier_Store(&is_profiling_, false);
+ processor_->Stop();
+ processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index f4bc0c7..9cd4484 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -124,9 +124,7 @@
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
- int period_in_useconds);
+ explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -158,12 +156,11 @@
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
- // StartTickSampleEvent returns a pointer only if the ticks_buffer_ is empty,
- // FinishTickSampleEvent marks the ticks_buffer_ as filled.
- // Finish should be called only after successful Start (returning non-NULL
- // pointer).
- INLINE(TickSample* StartTickSampleEvent());
- INLINE(void FinishTickSampleEvent());
+ // Tick sample events are filled directly in the buffer of the circular
+ // queue (because the structure is of fixed width, but usually not all
+ // stack frame entries are filled.) This method returns a pointer to the
+ // next record of the buffer.
+ INLINE(TickSample* TickSampleEvent());
private:
union CodeEventsContainer {
@@ -176,19 +173,13 @@
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
- void ProcessEventsQueue(int64_t stop_time, unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
- Sampler* sampler_;
bool running_;
- // Sampling period in microseconds.
- const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
- TickSampleEventRecord ticks_buffer_;
- bool ticks_buffer_is_empty_;
- bool ticks_buffer_is_initialized_;
+ SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
@@ -227,10 +218,7 @@
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
- // Finish should be called only after successful Start (returning non-NULL
- // pointer).
- static TickSample* StartTickSampleEvent(Isolate* isolate);
- static void FinishTickSampleEvent(Isolate* isolate);
+ static TickSample* TickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
diff --git a/src/debug.cc b/src/debug.cc
index cb2501e..48c5519 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2285,7 +2285,7 @@
// Find the call address in the running code. This address holds the call to
// either a DebugBreakXXX or to the debug break return entry code if the
// break point is still active after processing the break point.
- Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+ Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS exit or debug break slot.
bool at_js_return = false;
@@ -2374,7 +2374,7 @@
#endif
// Find the call address in the running code.
- Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
+ Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS return.
RelocIterator it(debug_info->code());
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index d7d392a..ad893b3 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1443,6 +1443,54 @@
}
+static Object* CutOutRelatedFunctionsList(Context* context,
+ Code* code,
+ Object* undefined) {
+ Object* result_list_head = undefined;
+ Object* head;
+ Object* current;
+ current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ JSFunction* prev = NULL;
+ while (current != undefined) {
+ JSFunction* func = JSFunction::cast(current);
+ current = func->next_function_link();
+ if (func->code() == code) {
+ func->set_next_function_link(result_list_head);
+ result_list_head = func;
+ if (prev) {
+ prev->set_next_function_link(current);
+ } else {
+ head = current;
+ }
+ } else {
+ prev = func;
+ }
+ }
+ if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
+ context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
+ }
+ return result_list_head;
+}
+
+
+void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
+ Code* code) {
+ Context* context = function->context()->native_context();
+
+ SharedFunctionInfo* shared = function->shared();
+
+ Object* undefined = Isolate::Current()->heap()->undefined_value();
+ Object* current = CutOutRelatedFunctionsList(context, code, undefined);
+
+ while (current != undefined) {
+ JSFunction* func = JSFunction::cast(current);
+ current = func->next_function_link();
+ func->set_code(shared->code());
+ func->set_next_function_link(undefined);
+ }
+}
+
+
FrameDescription::FrameDescription(uint32_t frame_size,
JSFunction* function)
: frame_size_(frame_size),
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 1995754..f67f986 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -166,6 +166,10 @@
// execution returns.
static void DeoptimizeFunction(JSFunction* function);
+ // Iterate over all the functions which share the same code object
+ // and make them use unoptimized version.
+ static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code);
+
// Deoptimize all functions in the heap.
static void DeoptimizeAll();
diff --git a/src/elements.cc b/src/elements.cc
index 39686a2..6afbcc0 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -1336,30 +1336,29 @@
int entry = dictionary->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->DeleteProperty(entry, mode);
- if (result == heap->true_value()) {
- MaybeObject* maybe_elements = dictionary->Shrink(key);
- FixedArray* new_elements = NULL;
- if (!maybe_elements->To(&new_elements)) {
- return maybe_elements;
+ if (result == heap->false_value()) {
+ if (mode == JSObject::STRICT_DELETION) {
+ // Deleting a non-configurable property in strict mode.
+ HandleScope scope(isolate);
+ Handle<Object> holder(obj);
+ Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
}
- if (is_arguments) {
- FixedArray::cast(obj->elements())->set(1, new_elements);
- } else {
- obj->set_elements(new_elements);
- }
+ return heap->false_value();
}
- if (mode == JSObject::STRICT_DELETION &&
- result == heap->false_value()) {
- // In strict mode, attempting to delete a non-configurable property
- // throws an exception.
- HandleScope scope(isolate);
- Handle<Object> holder(obj);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
- Handle<Object> args[2] = { name, holder };
- Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
+ MaybeObject* maybe_elements = dictionary->Shrink(key);
+ FixedArray* new_elements = NULL;
+ if (!maybe_elements->To(&new_elements)) {
+ return maybe_elements;
+ }
+ if (is_arguments) {
+ FixedArray::cast(obj->elements())->set(1, new_elements);
+ } else {
+ obj->set_elements(new_elements);
}
}
return heap->true_value();
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 02867ba..4c7c090 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -288,6 +288,11 @@
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
+DEFINE_bool(enable_movw_movt, false,
+ "enable loading 32-bit constant by means of movw/movt "
+ "instruction pairs (ARM only)")
+DEFINE_bool(enable_unaligned_accesses, true,
+ "enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
@@ -337,10 +342,6 @@
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
-// cpu-profiler.cc
-DEFINE_int(cpu_profiler_sampling_period, 1000,
- "CPU profiler sampling period in microseconds")
-
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index ecf645d..dde6bbd 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1112,6 +1112,8 @@
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
#elif defined(V8_TARGET_ARCH_ARM)
UNIMPLEMENTED();
+#elif defined(V8_TARGET_ARCH_MIPS)
+ UNIMPLEMENTED();
#else
#error Unsupported target architecture.
#endif
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 9c0ad45..c09ba4b 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -448,6 +448,11 @@
}
+bool GlobalHandles::IsIndependent(Object** location) {
+ return Node::FromLocation(location)->is_independent();
+}
+
+
bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath();
}
@@ -462,6 +467,9 @@
Node::FromLocation(location)->set_wrapper_class_id(class_id);
}
+uint16_t GlobalHandles::GetWrapperClassId(Object** location) {
+ return Node::FromLocation(location)->wrapper_class_id();
+}
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
diff --git a/src/global-handles.h b/src/global-handles.h
index ddf5fe2..866317e 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -131,6 +131,7 @@
WeakReferenceCallback callback);
static void SetWrapperClassId(Object** location, uint16_t class_id);
+ static uint16_t GetWrapperClassId(Object** location);
// Returns the current number of weak handles.
int NumberOfWeakHandles() { return number_of_weak_handles_; }
@@ -154,6 +155,8 @@
// Clear the weakness of a global handle.
void MarkIndependent(Object** location);
+ static bool IsIndependent(Object** location);
+
// Tells whether global handle is near death.
static bool IsNearDeath(Object** location);
diff --git a/src/globals.h b/src/globals.h
index 00ecd63..babffbf 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -136,21 +136,6 @@
#endif
#endif
-// Define unaligned read for the target architectures supporting it.
-#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#elif V8_TARGET_ARCH_ARM
-// Some CPU-OS combinations allow unaligned access on ARM. We assume
-// that unaligned accesses are not allowed unless the build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-#if CAN_USE_UNALIGNED_ACCESSES
-#define V8_TARGET_CAN_READ_UNALIGNED 1
-#endif
-#elif V8_TARGET_ARCH_MIPS
-#else
-#error Target architecture is not supported by v8
-#endif
-
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 876d8d8..bace902 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -85,13 +85,16 @@
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
- if (String::IsAscii(str.start(), str.length())) {
+ const char* start = str.start();
+ int length = str.length();
+ int non_ascii_start = String::NonAsciiStart(start, length);
+ if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
return AllocateStringFromAscii(str, pretenure);
}
// Non-ASCII and we need to decode.
- return AllocateStringFromUtf8Slow(str, pretenure);
+ return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
}
@@ -267,13 +270,6 @@
#endif
MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
-#ifdef DEBUG
- if (!result->IsFailure()) {
- // Maps have their own alignment.
- CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
- static_cast<intptr_t>(kHeapObjectTag));
- }
-#endif
return result;
}
diff --git a/src/heap.cc b/src/heap.cc
index 5de06aa..e3fcb93 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2102,6 +2102,7 @@
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
+ map->set_instance_descriptors(empty_descriptor_array());
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
@@ -2109,12 +2110,6 @@
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
- // If the map object is aligned fill the padding area with Smi 0 objects.
- if (Map::kPadStart < Map::kSize) {
- memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
- 0,
- Map::kSize - Map::kPadStart);
- }
return map;
}
@@ -2241,12 +2236,15 @@
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->init_back_pointer(undefined_value());
+ meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->init_back_pointer(undefined_value());
+ fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->init_back_pointer(undefined_value());
+ oddball_map()->set_instance_descriptors(empty_descriptor_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2715,7 +2713,7 @@
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
{ MaybeObject* maybe_obj =
- AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
+ AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
if (!maybe_obj->ToObject(&obj)) return false;
}
hidden_symbol_ = String::cast(obj);
@@ -3956,8 +3954,7 @@
if (HasDuplicates(descriptors)) {
fun->shared()->ForbidInlineConstructor();
} else {
- MaybeObject* maybe_failure = map->InitializeDescriptors(descriptors);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ map->InitializeDescriptors(descriptors);
map->set_pre_allocated_property_fields(count);
map->set_unused_property_fields(in_object_properties - count);
}
@@ -4431,13 +4428,14 @@
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
+ int non_ascii_start,
PretenureFlag pretenure) {
- // Count the number of characters in the UTF-8 string and check if
- // it is an ASCII string.
+ // Continue counting the number of characters in the UTF-8 string, starting
+ // from the first non-ascii character or word.
+ int chars = non_ascii_start;
Access<UnicodeCache::Utf8Decoder>
decoder(isolate_->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start(), string.length());
- int chars = 0;
+ decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
while (decoder->has_more()) {
uint32_t r = decoder->GetNext();
if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
diff --git a/src/heap.h b/src/heap.h
index 04bffdd..0ab7ae0 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -705,6 +705,7 @@
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
Vector<const char> str,
+ int non_ascii_start,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
Vector<const uc16> str,
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 70a6a05..939b4f4 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1058,6 +1058,13 @@
}
+void HLoadElements::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+ stream->Add(" ");
+ typecheck()->PrintNameTo(stream);
+}
+
+
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" [%p", *map_set()->first());
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index f8ec100..9e6344c 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -2123,6 +2123,9 @@
}
HValue* value() { return OperandAt(0); }
+ HValue* typecheck() { return OperandAt(1); }
+
+ virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 0b47748..7fdf50c 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -387,6 +387,11 @@
}
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4d9562e..b0f4651 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -601,6 +601,10 @@
inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -629,6 +633,7 @@
static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
static const int kCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
static const int kJSReturnSequenceLength = 6;
// The debug break slot must be able to contain a call instruction.
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index f50010b..99ad522 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -198,19 +198,7 @@
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- Context* context = function->context()->native_context();
- Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- SharedFunctionInfo* shared = function->shared();
- while (!element->IsUndefined()) {
- JSFunction* func = JSFunction::cast(element);
- // Grab element before code replacement as ReplaceCode alters the list.
- element = func->next_function_link();
- if (func->code() == code) {
- func->ReplaceCode(shared->code());
- }
- }
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4b08a6d..1d12d23 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1779,7 +1779,7 @@
Representation input_rep = value->representation();
if (input_rep.IsDouble()) {
LOperand* reg = UseRegister(value);
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+ return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
} else if (input_rep.IsInteger32()) {
LOperand* reg = UseFixed(value, eax);
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 1a64c45..0d0bf03 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -129,14 +129,22 @@
XMMRegister scratch_reg,
Register result_reg) {
Label done;
- ExternalReference zero_ref = ExternalReference::address_of_zero();
- movdbl(scratch_reg, Operand::StaticVariable(zero_ref));
- Set(result_reg, Immediate(0));
- ucomisd(input_reg, scratch_reg);
- j(below, &done, Label::kNear);
+ Label conv_failure;
+ pxor(scratch_reg, scratch_reg);
cvtsd2si(result_reg, input_reg);
test(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
+ cmp(result_reg, Immediate(0x80000000));
+ j(equal, &conv_failure, Label::kNear);
+ mov(result_reg, Immediate(0));
+ setcc(above, result_reg);
+ sub(result_reg, Immediate(1));
+ and_(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
+ Set(result_reg, Immediate(0));
+ ucomisd(input_reg, scratch_reg);
+ j(below, &done, Label::kNear);
Set(result_reg, Immediate(255));
bind(&done);
}
@@ -2577,28 +2585,7 @@
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- Register temp = descriptors;
- mov(temp, FieldOperand(map, Map::kTransitionsOrBackPointerOffset));
-
- Label ok, fail, load_from_back_pointer;
- CheckMap(temp,
- isolate()->factory()->fixed_array_map(),
- &fail,
- DONT_DO_SMI_CHECK);
- mov(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
- jmp(&ok);
-
- bind(&fail);
- cmp(temp, isolate()->factory()->undefined_value());
- j(not_equal, &load_from_back_pointer, Label::kNear);
- mov(descriptors, isolate()->factory()->empty_descriptor_array());
- jmp(&ok);
-
- bind(&load_from_back_pointer);
- mov(temp, FieldOperand(temp, Map::kTransitionsOrBackPointerOffset));
- mov(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
-
- bind(&ok);
+ mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 0e41093..49b6ef9 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -40,7 +40,7 @@
Address IC::address() const {
// Get the address of the call.
- Address result = pc() - Assembler::kCallTargetAddressOffset;
+ Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = Isolate::Current()->debug();
diff --git a/src/ic.cc b/src/ic.cc
index b902b53..dd0bb10 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -158,7 +158,7 @@
// Get the address of the call site in the active code. This is the
// place where the call to DebugBreakXXX is and where the IC
// normally would be.
- Address addr = pc() - Assembler::kCallTargetAddressOffset;
+ Address addr = Assembler::target_address_from_return_address(pc());
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
diff --git a/src/json-parser.h b/src/json-parser.h
index 8eb4f67..40116fa 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -149,6 +149,8 @@
}
inline Isolate* isolate() { return isolate_; }
+ inline Factory* factory() { return factory_; }
+ inline Handle<JSFunction> object_constructor() { return object_constructor_; }
inline Zone* zone() const { return zone_; }
static const int kInitialSpecialStringLength = 1024;
@@ -160,6 +162,8 @@
Handle<SeqAsciiString> seq_source_;
Isolate* isolate_;
+ Factory* factory_;
+ Handle<JSFunction> object_constructor_;
uc32 c0_;
int position_;
Zone* zone_;
@@ -169,6 +173,9 @@
Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
Zone* zone) {
isolate_ = source->map()->GetHeap()->isolate();
+ factory_ = isolate_->factory();
+ object_constructor_ =
+ Handle<JSFunction>(isolate()->native_context()->object_function());
zone_ = zone;
FlattenString(source);
source_ = source;
@@ -188,7 +195,7 @@
// Parse failed. Current character is the unexpected token.
const char* message;
- Factory* factory = isolate()->factory();
+ Factory* factory = this->factory();
Handle<JSArray> array;
switch (c0_) {
@@ -237,87 +244,101 @@
// Parse any JSON value.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
- switch (c0_) {
- case '"':
- return ParseJsonString();
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- return ParseJsonNumber();
- case 'f':
- if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return isolate()->factory()->false_value();
- } else {
- return ReportUnexpectedCharacter();
- }
- case 't':
- if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
- AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return isolate()->factory()->true_value();
- } else {
- return ReportUnexpectedCharacter();
- }
- case 'n':
- if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 'l') {
- AdvanceSkipWhitespace();
- return isolate()->factory()->null_value();
- } else {
- return ReportUnexpectedCharacter();
- }
- case '{':
- return ParseJsonObject();
- case '[':
- return ParseJsonArray();
- default:
- return ReportUnexpectedCharacter();
+ if (c0_ == '"') return ParseJsonString();
+ if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
+ if (c0_ == '{') return ParseJsonObject();
+ if (c0_ == '[') return ParseJsonArray();
+ if (c0_ == 'f') {
+ if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return factory()->false_value();
+ }
+ return ReportUnexpectedCharacter();
}
+ if (c0_ == 't') {
+ if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+ AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return factory()->true_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ if (c0_ == 'n') {
+ if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 'l') {
+ AdvanceSkipWhitespace();
+ return factory()->null_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ return ReportUnexpectedCharacter();
}
// Parse a JSON object. Position must be right at '{'.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
- Handle<JSFunction> object_constructor(
- isolate()->native_context()->object_function());
+ Handle<Object> prototype;
Handle<JSObject> json_object =
- isolate()->factory()->NewJSObject(object_constructor);
+ factory()->NewJSObject(object_constructor());
ASSERT_EQ(c0_, '{');
AdvanceSkipWhitespace();
if (c0_ != '}') {
do {
if (c0_ != '"') return ReportUnexpectedCharacter();
- Handle<String> key = ParseJsonSymbol();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
+ int start_position = position_;
+ Advance();
+
+ uint32_t index = 0;
+ while (c0_ >= '0' && c0_ <= '9') {
+ int d = c0_ - '0';
+ if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
+ index = (index * 10) + d;
+ Advance();
+ }
+
+ if (position_ != start_position + 1 && c0_ == '"') {
+ AdvanceSkipWhitespace();
+
+ if (c0_ != ':') return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
- } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
- SetPrototype(json_object, value);
} else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
+ position_ = start_position;
+#ifdef DEBUG
+ c0_ = '"';
+#endif
+
+ Handle<String> key = ParseJsonSymbol();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ if (key->Equals(isolate()->heap()->Proto_symbol())) {
+ prototype = value;
+ } else {
+ if (JSObject::TryTransitionToField(json_object, key)) {
+ int index = json_object->LastAddedFieldIndex();
+ json_object->FastPropertyAtPut(index, *value);
+ } else {
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
+ }
+ }
}
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
}
+ if (!prototype.is_null()) SetPrototype(json_object, prototype);
}
AdvanceSkipWhitespace();
return json_object;
@@ -344,11 +365,11 @@
AdvanceSkipWhitespace();
// Allocate a fixed array with all the elements.
Handle<FixedArray> fast_elements =
- isolate()->factory()->NewFixedArray(elements.length());
+ factory()->NewFixedArray(elements.length());
for (int i = 0, n = elements.length(); i < n; i++) {
fast_elements->set(i, *elements[i]);
}
- return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+ return factory()->NewJSArrayWithElements(fast_elements);
}
@@ -415,7 +436,7 @@
buffer.Dispose();
}
SkipWhitespace();
- return isolate()->factory()->NewNumber(number);
+ return factory()->NewNumber(number);
}
@@ -456,8 +477,7 @@
int count = end - start;
int max_length = count + source_length_ - position_;
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_str = NewRawString<StringType>(isolate()->factory(),
- length);
+ Handle<StringType> seq_str = NewRawString<StringType>(factory(), length);
// Copy prefix into seq_str.
SinkChar* dest = seq_str->GetChars();
String::WriteToFlat(*prefix, dest, start, end);
@@ -573,11 +593,14 @@
uc32 c0 = c0_;
do {
if (c0 == '\\') {
+ c0_ = c0;
+ int beg_pos = position_;
+ position_ = position;
return SlowScanJsonString<SeqAsciiString, char>(source_,
- position_,
- position);
+ beg_pos,
+ position_);
}
- if (c0_ < 0x20) return Handle<String>::null();
+ if (c0 < 0x20) return Handle<String>::null();
running_hash = StringHasher::AddCharacterCore(running_hash, c0);
position++;
if (position >= source_length_) return Handle<String>::null();
@@ -632,11 +655,11 @@
int length = position_ - beg_pos;
Handle<String> result;
if (seq_ascii && is_symbol) {
- result = isolate()->factory()->LookupAsciiSymbol(seq_source_,
+ result = factory()->LookupAsciiSymbol(seq_source_,
beg_pos,
length);
} else {
- result = isolate()->factory()->NewRawAsciiString(length);
+ result = factory()->NewRawAsciiString(length);
char* dest = SeqAsciiString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 37fec4e..24730c6 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -861,11 +861,13 @@
void CodeFlusher::ProcessJSFunctionCandidates() {
Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+ Object* undefined = isolate_->heap()->undefined_value();
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
+ ClearNextCandidate(candidate, undefined);
SharedFunctionInfo* shared = candidate->shared();
@@ -874,8 +876,8 @@
if (!code_mark.Get()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
- } else {
- candidate->set_code(shared->code());
+ } else if (code == lazy_compile) {
+ candidate->set_code(lazy_compile);
}
// We are in the middle of a GC cycle so the write barrier in the code
@@ -904,7 +906,7 @@
SharedFunctionInfo* next_candidate;
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
- SetNextCandidate(candidate, NULL);
+ ClearNextCandidate(candidate);
Code* code = candidate->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
@@ -2313,15 +2315,23 @@
void VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* target = rinfo->target_object();
+ Object* old_target = target;
VisitPointer(&target);
- rinfo->set_target_object(target);
+ // Avoid unnecessary changes that might unnecessary flush the instruction
+ // cache.
+ if (target != old_target) {
+ rinfo->set_target_object(target);
+ }
}
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Object* old_target = target;
VisitPointer(&target);
- rinfo->set_target_address(Code::cast(target)->instruction_start());
+ if (target != old_target) {
+ rinfo->set_target_address(Code::cast(target)->instruction_start());
+ }
}
void VisitDebugTarget(RelocInfo* rinfo) {
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 1d17582..7c64800 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -426,6 +426,7 @@
void AddCandidate(JSFunction* function) {
ASSERT(function->code() == function->shared()->code());
+ ASSERT(function->next_function_link()->IsUndefined());
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
@@ -439,30 +440,24 @@
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
- static JSFunction** GetNextCandidateField(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- candidate->address() + JSFunction::kCodeEntryOffset);
- }
-
static JSFunction* GetNextCandidate(JSFunction* candidate) {
- return *GetNextCandidateField(candidate);
+ Object* next_candidate = candidate->next_function_link();
+ return reinterpret_cast<JSFunction*>(next_candidate);
}
static void SetNextCandidate(JSFunction* candidate,
JSFunction* next_candidate) {
- *GetNextCandidateField(candidate) = next_candidate;
+ candidate->set_next_function_link(next_candidate);
}
- static SharedFunctionInfo** GetNextCandidateField(
- SharedFunctionInfo* candidate) {
- Code* code = candidate->code();
- return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kGCMetadataOffset);
+ static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+ ASSERT(undefined->IsUndefined());
+ candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
}
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- return reinterpret_cast<SharedFunctionInfo*>(
- candidate->code()->gc_metadata());
+ Object* next_candidate = candidate->code()->gc_metadata();
+ return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
}
static void SetNextCandidate(SharedFunctionInfo* candidate,
@@ -470,6 +465,10 @@
candidate->code()->set_gc_metadata(next_candidate);
}
+ static void ClearNextCandidate(SharedFunctionInfo* candidate) {
+ candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
+ }
+
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
diff --git a/src/messages.cc b/src/messages.cc
index a0793c2..23fd4fd 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -106,11 +106,20 @@
// We are calling into embedder's code which can throw exceptions.
// Thus we need to save current exception state, reset it to the clean one
// and ignore scheduled exceptions callbacks can throw.
+
+ // We pass the exception object into the message handler callback though.
+ Object* exception_object = isolate->heap()->undefined_value();
+ if (isolate->has_pending_exception()) {
+ isolate->pending_exception()->ToObject(&exception_object);
+ }
+ Handle<Object> exception_handle(exception_object);
+
Isolate::ExceptionScope exception_scope(isolate);
isolate->clear_pending_exception();
isolate->set_external_caught_exception(false);
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception_handle);
v8::NeanderArray global_listeners(FACTORY->message_listeners());
int global_length = global_listeners.length();
@@ -123,15 +132,13 @@
for (int i = 0; i < global_length; i++) {
HandleScope scope;
if (global_listeners.get(i)->IsUndefined()) continue;
- v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
- Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
+ Handle<Foreign> callback_obj(Foreign::cast(global_listeners.get(i)));
v8::MessageCallback callback =
FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
- Handle<Object> callback_data(listener.get(1));
{
// Do not allow exceptions to propagate.
v8::TryCatch try_catch;
- callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+ callback(api_message_obj, api_exception_obj);
}
if (isolate->has_scheduled_exception()) {
isolate->clear_scheduled_exception();
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 522bc78..3e726a7 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -156,6 +156,11 @@
}
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index ee4daad..59c45c9 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -318,12 +318,15 @@
// Register aliases.
// cp is assumed to be a callee saved register.
-static const Register& kLithiumScratchReg = s3; // Scratch register.
-static const Register& kLithiumScratchReg2 = s4; // Scratch register.
-static const Register& kRootRegister = s6; // Roots array pointer.
-static const Register& cp = s7; // JavaScript context pointer.
-static const DoubleRegister& kLithiumScratchDouble = f30;
-static const FPURegister& kDoubleRegZero = f28;
+// Defined using #define instead of "static const Register&" because Clang
+// complains otherwise when a compilation unit that includes this header
+// doesn't use the variables.
+#define kRootRegister s6
+#define cp s7
+#define kLithiumScratchReg s3
+#define kLithiumScratchReg2 s4
+#define kLithiumScratchDouble f30
+#define kDoubleRegZero f28
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -571,6 +574,10 @@
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ inline static Address target_address_from_return_address(Address pc);
+
static void JumpLabelToJumpRegister(Address pc);
static void QuietNaN(HeapObject* nan);
@@ -631,6 +638,8 @@
// register.
static const int kPcLoadDelta = 4;
+ static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize;
+
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceInstructions = 7;
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 0b551fb..ca31826 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -7633,6 +7633,16 @@
Label need_incremental;
Label need_incremental_pop_scratch;
+ __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+ __ lw(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
+ __ sw(regs_.scratch1(),
+ MemOperand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -7757,7 +7767,9 @@
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
+ __ StoreNumberToDoubleElements(a0, a3, a1,
+ // Overwrites all regs after this.
+ t1, t2, t3, t5, a2,
&slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 371d120..9fd815b 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -100,19 +100,7 @@
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- Context* context = function->context()->native_context();
- Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- SharedFunctionInfo* shared = function->shared();
- while (!element->IsUndefined()) {
- JSFunction* func = JSFunction::cast(element);
- // Grab element before code replacement as ReplaceCode alters the list.
- element = func->next_function_link();
- if (func->code() == code) {
- func->ReplaceCode(shared->code());
- }
- }
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 4f09747..3e89fb4 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1152,7 +1152,7 @@
__ EnumLength(a1, v0);
__ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
- __ LoadInstanceDescriptors(v0, a2, t0);
+ __ LoadInstanceDescriptors(v0, a2);
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -2712,7 +2712,7 @@
__ NumberOfOwnDescriptors(a3, a1);
__ Branch(&done, eq, a3, Operand(zero_reg));
- __ LoadInstanceDescriptors(a1, t0, a2);
+ __ LoadInstanceDescriptors(a1, t0);
// t0: descriptor array.
// a3: valid entries in the descriptor array.
STATIC_ASSERT(kSmiTag == 0);
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 3f2ecb8..cf70681 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1189,6 +1189,145 @@
}
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length,
+ Register value,
+ Register key,
+ Register receiver,
+ Register receiver_map,
+ Register elements_map,
+ Register elements) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ Register scratch_value = t0;
+ Register address = t1;
+ if (check_map == kCheckMap) {
+ __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(fast_double, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ }
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
+
+ // Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
+ // Update write barrier for the elements array address.
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+ __ Branch(slow, ne, elements_map, Operand(at));
+ }
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements, // Overwritten.
+ a3, // Scratch regs...
+ t0,
+ t1,
+ t2,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ }
+ __ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&non_double_value, ne, t0, Operand(at));
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ t0,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ t0,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ t0,
+ slow);
+ ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e --------------
@@ -1197,11 +1336,9 @@
// -- a2 : receiver
// -- ra : return address
// -----------------------------------
- Label slow, array, extra, check_if_double_array;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label transition_smi_elements, finish_object_store, non_double_value;
- Label transition_double_elements;
+ Label slow, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
// Register usage.
Register value = a0;
@@ -1233,7 +1370,7 @@
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
+ __ Branch(&fast_object, lo, key, Operand(t0));
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1258,19 +1395,11 @@
__ Branch(
&check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
- // Calculate key + 1 as smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(t0, key, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&fast_object_without_map_check);
+ __ jmp(&fast_object_grow);
__ bind(&check_if_double_array);
__ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- // Add 1 to key, and go to common element store code for doubles.
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(t0, key, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -1281,110 +1410,15 @@
// Check the key against the length in the array.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
- // Fall through to fast case.
- __ bind(&fast_object_with_map_check);
- Register scratch_value = t0;
- Register address = t1;
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&fast_double_with_map_check,
- ne,
- elements_map,
- Heap::kFixedArrayMapRootIndex);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
- __ sw(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(v0, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- value,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- receiver,
- elements,
- a3,
- t0,
- t1,
- t2,
- &transition_double_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value);
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, t0, Operand(at));
-
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- t0,
- &slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- &slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are double, but value is an Object that's not a HeapNumber. Make
- // sure that the receiver is a Array with Object elements and transition array
- // from double elements to Object elements.
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- t0,
- &slow);
- ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength,
+ value, key, receiver, receiver_map,
+ elements_map, elements);
}
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 77d4b9b..4c2182b 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -5391,7 +5391,6 @@
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->scratch());
Label load_cache, done;
__ EnumLength(result, map);
__ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
@@ -5399,7 +5398,7 @@
__ jmp(&done);
__ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result, scratch);
+ __ LoadInstanceDescriptors(map, result);
__ lw(result,
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 4015bff..0b6dcae 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1829,12 +1829,11 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() &&
+ (instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32() ||
@@ -1897,13 +1896,12 @@
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() &&
+ (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2236,9 +2234,7 @@
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
- LOperand* scratch = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map, scratch)));
+ return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 939fc67..3a9aa7a 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -2379,15 +2379,13 @@
};
-class LForInCacheArray: public LTemplateInstruction<1, 1, 1> {
+class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LForInCacheArray(LOperand* map, LOperand* scratch) {
+ explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
- temps_[0] = scratch;
}
LOperand* map() { return inputs_[0]; }
- LOperand* scratch() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e88e5be..052387a 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3445,7 +3445,7 @@
destination = FloatingPointHelper::kCoreRegisters;
}
- Register untagged_value = receiver_reg;
+ Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
@@ -5302,31 +5302,8 @@
void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors,
- Register scratch) {
- Register temp = descriptors;
- lw(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset));
-
- Label ok, fail, load_from_back_pointer;
- CheckMap(temp,
- scratch,
- isolate()->factory()->fixed_array_map(),
- &fail,
- DONT_DO_SMI_CHECK);
- lw(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
- jmp(&ok);
-
- bind(&fail);
- LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- Branch(&load_from_back_pointer, ne, temp, Operand(scratch));
- LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
- jmp(&ok);
-
- bind(&load_from_back_pointer);
- lw(temp, FieldMemOperand(temp, Map::kTransitionsOrBackPointerOffset));
- lw(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
-
- bind(&ok);
+ Register descriptors) {
+ lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index ad3004a..b57e514 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -973,6 +973,7 @@
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
+ // All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
@@ -1397,9 +1398,7 @@
DoubleRegister temp_double_reg);
- void LoadInstanceDescriptors(Register map,
- Register descriptors,
- Register scratch);
+ void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 21d1ce1..672ba0e 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -1103,6 +1103,11 @@
}
+bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
+ return false;
+}
+
+
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index 5446f52..8dd52a4 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -112,6 +112,7 @@
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
+ virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 391f8e0..ba1d177 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -4748,6 +4748,7 @@
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
+ // All registers after this are overwritten.
elements_reg,
scratch1,
scratch2,
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 7e0ba9f..d2f996b 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1414,6 +1414,43 @@
}
+MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
+ ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
+ map->NumberOfOwnDescriptors());
+ if (this->map()->unused_property_fields() == 0) {
+ int new_size = properties()->length() + map->unused_property_fields() + 1;
+ FixedArray* new_properties;
+ MaybeObject* maybe_properties = properties()->CopySize(new_size);
+ if (!maybe_properties->To(&new_properties)) return maybe_properties;
+ set_properties(new_properties);
+ }
+ set_map(map);
+ return this;
+}
+
+
+bool JSObject::TryTransitionToField(Handle<JSObject> object,
+ Handle<String> key) {
+ if (!object->map()->HasTransitionArray()) return false;
+ Handle<TransitionArray> transitions(object->map()->transitions());
+ int transition = transitions->Search(*key);
+ if (transition == TransitionArray::kNotFound) return false;
+ PropertyDetails target_details = transitions->GetTargetDetails(transition);
+ if (target_details.type() != FIELD) return false;
+ if (target_details.attributes() != NONE) return false;
+ Handle<Map> target(transitions->GetTarget(transition));
+ JSObject::AddFastPropertyUsingMap(object, target);
+ return true;
+}
+
+
+int JSObject::LastAddedFieldIndex() {
+ Map* map = this->map();
+ int last_added = map->LastAdded();
+ return map->instance_descriptors()->GetFieldIndex(last_added);
+}
+
+
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -3548,32 +3585,16 @@
}
-DescriptorArray* Map::instance_descriptors() {
- if (HasTransitionArray()) return transitions()->descriptors();
- Object* back_pointer = GetBackPointer();
- if (!back_pointer->IsMap()) return GetHeap()->empty_descriptor_array();
- return Map::cast(back_pointer)->instance_descriptors();
-}
-
-
-enum TransitionsKind { DESCRIPTORS_HOLDER, FULL_TRANSITION_ARRAY };
-
-
// If the descriptor is using the empty transition array, install a new empty
// transition array that will have place for an element transition.
-static MaybeObject* EnsureHasTransitionArray(Map* map, TransitionsKind kind) {
+static MaybeObject* EnsureHasTransitionArray(Map* map) {
TransitionArray* transitions;
MaybeObject* maybe_transitions;
if (!map->HasTransitionArray()) {
- if (kind == FULL_TRANSITION_ARRAY) {
- maybe_transitions = TransitionArray::Allocate(0);
- } else {
- maybe_transitions = TransitionArray::AllocateDescriptorsHolder();
- }
+ maybe_transitions = TransitionArray::Allocate(0);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
transitions->set_back_pointer_storage(map->GetBackPointer());
- } else if (kind == FULL_TRANSITION_ARRAY &&
- !map->transitions()->IsFullTransitionArray()) {
+ } else if (!map->transitions()->IsFullTransitionArray()) {
maybe_transitions = map->transitions()->ExtendToFullTransitionArray();
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
} else {
@@ -3584,19 +3605,7 @@
}
-MaybeObject* Map::SetDescriptors(DescriptorArray* value) {
- ASSERT(!is_shared());
- MaybeObject* maybe_failure =
- EnsureHasTransitionArray(this, DESCRIPTORS_HOLDER);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
- ASSERT(NumberOfOwnDescriptors() <= value->number_of_descriptors());
- transitions()->set_descriptors(value);
- return this;
-}
-
-
-MaybeObject* Map::InitializeDescriptors(DescriptorArray* descriptors) {
+void Map::InitializeDescriptors(DescriptorArray* descriptors) {
int len = descriptors->number_of_descriptors();
#ifdef DEBUG
ASSERT(len <= DescriptorArray::kMaxNumberOfDescriptors);
@@ -3615,14 +3624,12 @@
}
#endif
- MaybeObject* maybe_failure = SetDescriptors(descriptors);
- if (maybe_failure->IsFailure()) return maybe_failure;
-
+ set_instance_descriptors(descriptors);
SetNumberOfOwnDescriptors(len);
- return this;
}
+ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
SMI_ACCESSORS(Map, bit_field3, kBitField3Offset)
@@ -3688,8 +3695,7 @@
Map* target,
SimpleTransitionFlag flag) {
if (HasTransitionArray()) return transitions()->CopyInsert(key, target);
- return TransitionArray::NewWith(
- flag, key, target, instance_descriptors(), GetBackPointer());
+ return TransitionArray::NewWith(flag, key, target, GetBackPointer());
}
@@ -3704,11 +3710,8 @@
MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
- DescriptorArray* descriptors = instance_descriptors();
- MaybeObject* allow_elements =
- EnsureHasTransitionArray(this, FULL_TRANSITION_ARRAY);
+ MaybeObject* allow_elements = EnsureHasTransitionArray(this);
if (allow_elements->IsFailure()) return allow_elements;
- transitions()->set_descriptors(descriptors);
transitions()->set_elements_transition(transitioned_map);
return this;
}
@@ -3724,9 +3727,7 @@
MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
- DescriptorArray* descriptors = instance_descriptors();
- MaybeObject* allow_prototype =
- EnsureHasTransitionArray(this, FULL_TRANSITION_ARRAY);
+ MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
if (allow_prototype->IsFailure()) return allow_prototype;
#ifdef DEBUG
if (HasPrototypeTransitions()) {
@@ -3734,7 +3735,6 @@
ZapPrototypeTransitions();
}
#endif
- transitions()->set_descriptors(descriptors);
transitions()->SetPrototypeTransitions(proto_transitions);
return this;
}
@@ -4955,7 +4955,7 @@
running_hash ^= (running_hash >> 11);
running_hash += (running_hash << 15);
if ((running_hash & String::kHashBitMask) == 0) {
- return 27;
+ return kZeroHash;
}
return running_hash;
}
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index d7727c1..b1118de 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -564,10 +564,9 @@
}
PrintF(out, " - back pointer: ");
GetBackPointer()->ShortPrint(out);
- PrintF(out, "\n - instance descriptors %i #%i %i: ",
+ PrintF(out, "\n - instance descriptors %i #%i: ",
owns_descriptors(),
- NumberOfOwnDescriptors(),
- StoresOwnDescriptors());
+ NumberOfOwnDescriptors());
instance_descriptors()->ShortPrint(out);
if (HasTransitionArray()) {
PrintF(out, "\n - transitions: ");
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index b704c1f..d698a8d 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -390,14 +390,6 @@
Heap* heap, TransitionArray* transitions) {
if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
- // Skip recording the descriptors_pointer slot since the cell space
- // is not compacted and descriptors are referenced through a cell.
- Object** descriptors_slot = transitions->GetDescriptorsSlot();
- HeapObject* descriptors = HeapObject::cast(*descriptors_slot);
- StaticVisitor::MarkObject(heap, descriptors);
- heap->mark_compact_collector()->RecordSlot(
- descriptors_slot, descriptors_slot, descriptors);
-
// Simple transitions do not have keys nor prototype transitions.
if (transitions->IsSimpleTransition()) return;
diff --git a/src/objects.cc b/src/objects.cc
index 7dcefa2..792b6d9 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1764,23 +1764,6 @@
Map* old_target = old_map->GetTransition(transition_index);
Object* result;
- // To sever a transition to a map with which the descriptors are shared, the
- // larger map (more descriptors) needs to store its own descriptors array.
- // Both sides of the severed chain need to have their own descriptors pointer
- // to store distinct descriptor arrays.
-
- // If the old_target did not yet store its own descriptors, the new
- // descriptors pointer is created for the old_target by temporarily clearing
- // the back pointer and setting its descriptor array.
-
- // This phase is executed before creating the new map since it requires
- // allocation that may fail.
- if (!old_target->StoresOwnDescriptors()) {
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- MaybeObject* maybe_failure = old_target->SetDescriptors(old_descriptors);
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
-
MaybeObject* maybe_result =
ConvertDescriptorToField(name, new_value, attributes);
if (!maybe_result->To(&result)) return maybe_result;
@@ -1801,8 +1784,7 @@
old_target->instance_descriptors() == old_map->instance_descriptors()) {
// Since the conversion above generated a new fast map with an additional
// property which can be shared as well, install this descriptor pointer
- // along the entire chain of smaller maps; and remove the transition array
- // that is only in place to hold the descriptor array in the new map.
+ // along the entire chain of smaller maps.
Map* map;
DescriptorArray* new_descriptors = new_map->instance_descriptors();
DescriptorArray* old_descriptors = old_map->instance_descriptors();
@@ -1810,14 +1792,11 @@
!current->IsUndefined();
current = map->GetBackPointer()) {
map = Map::cast(current);
- if (!map->HasTransitionArray()) break;
- TransitionArray* transitions = map->transitions();
- if (transitions->descriptors() != old_descriptors) break;
+ if (map->instance_descriptors() != old_descriptors) break;
map->SetEnumLength(Map::kInvalidEnumCache);
- transitions->set_descriptors(new_descriptors);
+ map->set_instance_descriptors(new_descriptors);
}
old_map->set_owns_descriptors(false);
- new_map->ClearTransitions(GetHeap());
}
old_map->SetTransition(transition_index, new_map);
@@ -2201,7 +2180,7 @@
new_descriptors->CopyFrom(i, *descriptors, i, witness);
}
- Map::SetDescriptors(map, new_descriptors);
+ map->set_instance_descriptors(*new_descriptors);
}
@@ -2815,6 +2794,14 @@
}
+void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> map) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->AddFastPropertyUsingMap(*map));
+}
+
+
MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
String* name_raw,
Object* value_raw,
@@ -3591,7 +3578,6 @@
ASSERT(!IsJSGlobalProxy());
MaybeObject* hidden_lookup =
GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
Object* inline_value = hidden_lookup->ToObjectUnchecked();
if (inline_value->IsSmi()) {
@@ -3632,13 +3618,11 @@
return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
}
ASSERT(!IsJSGlobalProxy());
-
- // If there is no backing store yet, store the identity hash inline.
MaybeObject* hidden_lookup =
GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- ASSERT(!hidden_lookup->IsFailure());
Object* inline_value = hidden_lookup->ToObjectUnchecked();
+ // If there is no backing store yet, store the identity hash inline.
if (value->IsSmi() &&
key == GetHeap()->identity_hash_symbol() &&
(inline_value->IsUndefined() || inline_value->IsSmi())) {
@@ -3675,15 +3659,16 @@
JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
return;
}
+ ASSERT(!IsJSGlobalProxy());
MaybeObject* hidden_lookup =
GetHiddenPropertiesHashTable(ONLY_RETURN_INLINE_VALUE);
- ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg.
- if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
- // We never delete (inline-stored) identity hashes.
- ASSERT(!hidden_lookup->ToObjectUnchecked()->IsSmi());
+ Object* inline_value = hidden_lookup->ToObjectUnchecked();
- ObjectHashTable* hashtable =
- ObjectHashTable::cast(hidden_lookup->ToObjectUnchecked());
+ // We never delete (inline-stored) identity hashes.
+ ASSERT(key != GetHeap()->identity_hash_symbol());
+ if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
+
+ ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
MaybeObject* delete_result = hashtable->Put(key, GetHeap()->the_hole_value());
USE(delete_result);
ASSERT(!delete_result->IsFailure()); // Delete does not cause GC.
@@ -3904,6 +3889,21 @@
return isolate->heap()->false_value();
}
+ if (IsStringObjectWithCharacterAt(index)) {
+ if (mode == STRICT_DELETION) {
+ // Deleting a non-configurable property in strict mode.
+ HandleScope scope(isolate);
+ Handle<Object> holder(this);
+ Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
+ }
+ return isolate->heap()->false_value();
+ }
+
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return isolate->heap()->false_value();
@@ -4214,13 +4214,6 @@
}
-void Map::SetDescriptors(Handle<Map> map,
- Handle<DescriptorArray> descriptors) {
- Isolate* isolate = map->GetIsolate();
- CALL_HEAP_FUNCTION_VOID(isolate, map->SetDescriptors(*descriptors));
-}
-
-
int Map::NumberOfDescribedProperties(DescriptorFlag which,
PropertyAttributes filter) {
int result = 0;
@@ -4998,37 +4991,37 @@
new_descriptors->Append(descriptor, witness);
- // If the source descriptors had an enum cache we copy it. This ensures that
- // the maps to which we push the new descriptor array back can rely on a
- // cache always being available once it is set. If the map has more
- // enumerated descriptors than available in the original cache, the cache
- // will be lazily replaced by the extended cache when needed.
- if (descriptors->HasEnumCache()) {
- new_descriptors->CopyEnumCacheFrom(descriptors);
- }
+ if (old_size > 0) {
+ // If the source descriptors had an enum cache we copy it. This ensures
+ // that the maps to which we push the new descriptor array back can rely
+ // on a cache always being available once it is set. If the map has more
+ // enumerated descriptors than available in the original cache, the cache
+ // will be lazily replaced by the extended cache when needed.
+ if (descriptors->HasEnumCache()) {
+ new_descriptors->CopyEnumCacheFrom(descriptors);
+ }
- Map* map;
- // Replace descriptors by new_descriptors in all maps that share it.
- for (Object* current = GetBackPointer();
- !current->IsUndefined();
- current = map->GetBackPointer()) {
- map = Map::cast(current);
- if (!map->HasTransitionArray()) break;
- TransitionArray* transitions = map->transitions();
- if (transitions->descriptors() != descriptors) break;
- transitions->set_descriptors(new_descriptors);
- }
+ Map* map;
+ // Replace descriptors by new_descriptors in all maps that share it.
+ for (Object* current = GetBackPointer();
+ !current->IsUndefined();
+ current = map->GetBackPointer()) {
+ map = Map::cast(current);
+ if (map->instance_descriptors() != descriptors) break;
+ map->set_instance_descriptors(new_descriptors);
+ }
- transitions->set_descriptors(new_descriptors);
+ set_instance_descriptors(new_descriptors);
+ }
}
- set_transitions(transitions);
result->SetBackPointer(this);
- set_owns_descriptors(false);
-
- result->SetNumberOfOwnDescriptors(new_descriptors->number_of_descriptors());
+ result->InitializeDescriptors(new_descriptors);
ASSERT(result->NumberOfOwnDescriptors() == NumberOfOwnDescriptors() + 1);
+ set_transitions(transitions);
+ set_owns_descriptors(false);
+
return result;
}
@@ -5043,13 +5036,7 @@
MaybeObject* maybe_result = CopyDropDescriptors();
if (!maybe_result->To(&result)) return maybe_result;
- // Unless we are creating a map with no descriptors and no back pointer, we
- // insert the descriptor array locally.
- if (!descriptors->IsEmpty()) {
- MaybeObject* maybe_failure = result->SetDescriptors(descriptors);
- if (maybe_failure->IsFailure()) return maybe_failure;
- result->SetNumberOfOwnDescriptors(descriptors->number_of_descriptors());
- }
+ result->InitializeDescriptors(descriptors);
if (flag == INSERT_TRANSITION && CanHaveMoreTransitions()) {
TransitionArray* transitions;
@@ -5060,23 +5047,6 @@
MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
- if (descriptors->IsEmpty()) {
- if (owns_descriptors()) {
- // If the copied map has no added fields, and the parent map owns its
- // descriptors, those descriptors have to be empty. In that case,
- // transfer ownership of the descriptors to the new child.
- ASSERT(instance_descriptors()->IsEmpty());
- set_owns_descriptors(false);
- } else {
- // If the parent did not own its own descriptors, it may share a larger
- // descriptors array already. In that case, force a split by setting
- // the descriptor array of the new map to the empty descriptor array.
- MaybeObject* maybe_failure =
- result->SetDescriptors(GetHeap()->empty_descriptor_array());
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
- }
-
set_transitions(transitions);
result->SetBackPointer(this);
}
@@ -5098,7 +5068,10 @@
ASSERT(kind != elements_kind());
}
- if (flag == INSERT_TRANSITION && owns_descriptors()) {
+ bool insert_transition =
+ flag == INSERT_TRANSITION && !HasElementsTransition();
+
+ if (insert_transition && owns_descriptors()) {
// In case the map owned its own descriptors, share the descriptors and
// transfer ownership to the new map.
Map* new_map;
@@ -5109,8 +5082,8 @@
if (added_elements->IsFailure()) return added_elements;
new_map->set_elements_kind(kind);
+ new_map->InitializeDescriptors(instance_descriptors());
new_map->SetBackPointer(this);
- new_map->SetNumberOfOwnDescriptors(NumberOfOwnDescriptors());
set_owns_descriptors(false);
return new_map;
}
@@ -5121,24 +5094,12 @@
Map* new_map;
MaybeObject* maybe_new_map = Copy();
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- ASSERT(new_map->NumberOfOwnDescriptors() == NumberOfOwnDescriptors());
+
new_map->set_elements_kind(kind);
- if (flag == INSERT_TRANSITION && !HasElementsTransition()) {
- // Map::Copy does not store the descriptor array in case it is empty, since
- // it does not insert a back pointer; implicitly indicating that its
- // descriptor array is empty. Since in this case we do want to insert a back
- // pointer, we have to manually set the empty descriptor array to force a
- // split.
- if (!new_map->StoresOwnDescriptors()) {
- ASSERT(new_map->NumberOfOwnDescriptors() == 0);
- MaybeObject* maybe_failure =
- new_map->SetDescriptors(GetHeap()->empty_descriptor_array());
- if (maybe_failure->IsFailure()) return maybe_failure;
- }
+ if (insert_transition) {
MaybeObject* added_elements = set_elements_transition_map(new_map);
if (added_elements->IsFailure()) return added_elements;
-
new_map->SetBackPointer(this);
}
@@ -7452,14 +7413,8 @@
// Clear a possible back pointer in case the transition leads to a dead map.
// Return true in case a back pointer has been cleared and false otherwise.
-static bool ClearBackPointer(Heap* heap,
- Map* target,
- DescriptorArray* descriptors,
- bool* descriptors_owner_died) {
+static bool ClearBackPointer(Heap* heap, Map* target) {
if (Marking::MarkBitFrom(target).Get()) return false;
- if (target->instance_descriptors() == descriptors) {
- *descriptors_owner_died = true;
- }
target->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
return true;
}
@@ -7479,13 +7434,18 @@
int transition_index = 0;
- DescriptorArray* descriptors = t->descriptors();
+ DescriptorArray* descriptors = instance_descriptors();
bool descriptors_owner_died = false;
// Compact all live descriptors to the left.
for (int i = 0; i < t->number_of_transitions(); ++i) {
Map* target = t->GetTarget(i);
- if (!ClearBackPointer(heap, target, descriptors, &descriptors_owner_died)) {
+ if (ClearBackPointer(heap, target)) {
+ if (target->instance_descriptors() == descriptors) {
+ descriptors_owner_died = true;
+ descriptors_owner_died = true;
+ }
+ } else {
if (i != transition_index) {
String* key = t->GetKey(i);
t->SetKey(transition_index, key);
@@ -7499,10 +7459,10 @@
}
if (t->HasElementsTransition() &&
- ClearBackPointer(heap,
- t->elements_transition(),
- descriptors,
- &descriptors_owner_died)) {
+ ClearBackPointer(heap, t->elements_transition())) {
+ if (t->elements_transition()->instance_descriptors() == descriptors) {
+ descriptors_owner_died = true;
+ }
t->ClearElementsTransition();
} else {
// If there are no transitions to be cleared, return.
@@ -7518,7 +7478,7 @@
TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
} else {
- t->set_descriptors(heap->empty_descriptor_array());
+ ASSERT(descriptors == GetHeap()->empty_descriptor_array());
}
}
@@ -12121,7 +12081,7 @@
hash += hash << 3;
hash ^= hash >> 11;
hash += hash << 15;
- if ((hash & String::kHashBitMask) == 0) hash = String::kZeroHash;
+ if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
#ifdef DEBUG
StringHasher hasher(2, seed);
hasher.AddCharacter(c1);
@@ -13000,8 +12960,7 @@
descriptors->Sort();
- MaybeObject* maybe_failure = new_map->InitializeDescriptors(descriptors);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ new_map->InitializeDescriptors(descriptors);
new_map->set_unused_property_fields(unused_property_fields);
// Transform the object.
diff --git a/src/objects.h b/src/objects.h
index edc85de..0d1a69c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1621,6 +1621,18 @@
Handle<Object> value,
PropertyAttributes attributes);
+ // Try to follow an existing transition to a field with attributes NONE. The
+ // return value indicates whether the transition was successful.
+ static inline bool TryTransitionToField(Handle<JSObject> object,
+ Handle<String> key);
+
+ inline int LastAddedFieldIndex();
+
+ // Extend the receiver with a single fast property appeared first in the
+ // passed map. This also extends the property backing store if necessary.
+ static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
+ inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
+
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
String* key,
@@ -2456,11 +2468,12 @@
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
-// [0]: Either Smi(0) if uninitialized, or a pointer to small fixed array:
+// [0]: Number of descriptors
+// [1]: Either Smi(0) if uninitialized, or a pointer to small fixed array:
// [0]: pointer to fixed array with enum cache
// [1]: either Smi(0) or pointer to fixed array with indices
-// [1]: first key
-// [length() - kDescriptorSize]: last key
+// [2]: first key
+// [2 + number of descriptors * kDescriptorSize]: start of slack
class DescriptorArray: public FixedArray {
public:
// WhitenessWitness is used to prove that a descriptor array is white
@@ -4808,7 +4821,6 @@
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
- bool StoresOwnDescriptors() { return HasTransitionArray(); }
inline bool HasTransitionArray();
inline bool HasElementsTransition();
inline Map* elements_transition_map();
@@ -4856,13 +4868,8 @@
inline JSFunction* unchecked_constructor();
// [instance descriptors]: describes the object.
- inline DescriptorArray* instance_descriptors();
- MUST_USE_RESULT inline MaybeObject* SetDescriptors(
- DescriptorArray* descriptors);
- static void SetDescriptors(Handle<Map> map,
- Handle<DescriptorArray> descriptors);
- MUST_USE_RESULT inline MaybeObject* InitializeDescriptors(
- DescriptorArray* descriptors);
+ DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+ inline void InitializeDescriptors(DescriptorArray* descriptors);
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
@@ -5124,11 +5131,12 @@
// indirection.
static const int kTransitionsOrBackPointerOffset =
kConstructorOffset + kPointerSize;
- static const int kCodeCacheOffset =
+ static const int kDescriptorsOffset =
kTransitionsOrBackPointerOffset + kPointerSize;
+ static const int kCodeCacheOffset =
+ kDescriptorsOffset + kPointerSize;
static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
- static const int kPadStart = kBitField3Offset + kPointerSize;
- static const int kSize = MAP_POINTER_ALIGN(kPadStart);
+ static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
@@ -7372,7 +7380,7 @@
kIsNotArrayIndexMask | kHashNotComputedMask;
// Value of hash field containing computed hash equal to zero.
- static const int kZeroHash = kIsNotArrayIndexMask;
+ static const int kEmptyStringHash = kIsNotArrayIndexMask;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;
@@ -7407,32 +7415,47 @@
int from,
int to);
- static inline bool IsAscii(const char* chars, int length) {
+ // The return value may point to the first aligned word containing the
+ // first non-ascii character, rather than directly to the non-ascii character.
+ // If the return value is >= the passed length, the entire string was ASCII.
+ static inline int NonAsciiStart(const char* chars, int length) {
+ const char* start = chars;
const char* limit = chars + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
ASSERT(kMaxAsciiCharCode == 0x7F);
const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
- return false;
+ return static_cast<int>(chars - start);
}
chars += sizeof(uintptr_t);
}
#endif
while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) return false;
+ if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) {
+ return static_cast<int>(chars - start);
+ }
++chars;
}
- return true;
+ return static_cast<int>(chars - start);
+ }
+
+ static inline bool IsAscii(const char* chars, int length) {
+ return NonAsciiStart(chars, length) >= length;
+ }
+
+ static inline int NonAsciiStart(const uc16* chars, int length) {
+ const uc16* limit = chars + length;
+ const uc16* start = chars;
+ while (chars < limit) {
+ if (*chars > kMaxAsciiCharCodeU) return static_cast<int>(chars - start);
+ ++chars;
+ }
+ return static_cast<int>(chars - start);
}
static inline bool IsAscii(const uc16* chars, int length) {
- const uc16* limit = chars + length;
- while (chars < limit) {
- if (*chars > kMaxAsciiCharCodeU) return false;
- ++chars;
- }
- return true;
+ return NonAsciiStart(chars, length) >= length;
}
protected:
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index b39dfc0..089ea38 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -692,8 +692,9 @@
CONTEXT context;
memset(&context, 0, sizeof(context));
- TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread) == kSuspendFailed) return;
@@ -713,7 +714,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
- CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
@@ -768,11 +768,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 6d1bccd..511759c 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -678,8 +678,9 @@
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -700,7 +701,6 @@
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
@@ -884,11 +884,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index fe366c9..beb2cce 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -174,6 +174,24 @@
}
+CpuImplementer OS::GetCpuImplementer() {
+ static bool use_cached_value = false;
+ static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
+ if (use_cached_value) {
+ return cached_value;
+ }
+ if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
+ cached_value = ARM_IMPLEMENTER;
+ } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
+ cached_value = QUALCOMM_IMPLEMENTER;
+ } else {
+ cached_value = UNKNOWN_IMPLEMENTER;
+ }
+ use_cached_value = true;
+ return cached_value;
+}
+
+
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
@@ -1015,8 +1033,9 @@
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -1051,74 +1070,16 @@
#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
-class CpuProfilerSignalHandler {
- public:
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- ScopedLock lock(mutex_);
- if (signal_handler_installed_counter_ > 0) {
- signal_handler_installed_counter_++;
- return;
- }
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0) {
- signal_handler_installed_counter_++;
- }
- }
-
- static void RestoreSignalHandler() {
- ScopedLock lock(mutex_);
- if (signal_handler_installed_counter_ == 0)
- return;
- if (signal_handler_installed_counter_ == 1) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- }
- signal_handler_installed_counter_--;
- }
-
- static bool signal_handler_installed() {
- return signal_handler_installed_counter_ > 0;
- }
-
- private:
- static int signal_handler_installed_counter_;
- static struct sigaction old_signal_handler_;
- static Mutex* mutex_;
-};
-
-
-int CpuProfilerSignalHandler::signal_handler_installed_counter_ = 0;
-struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
-Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
-
-
class Sampler::PlatformData : public Malloced {
public:
- PlatformData()
- : vm_tgid_(getpid()),
- vm_tid_(GetThreadID()) {}
+ PlatformData() : vm_tid_(GetThreadID()) {}
- void SendProfilingSignal() {
- if (!CpuProfilerSignalHandler::signal_handler_installed()) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-#endif
- }
+ int vm_tid() const { return vm_tid_; }
private:
- const int vm_tgid_;
const int vm_tid_;
};
@@ -1134,11 +1095,28 @@
explicit SignalSender(int interval)
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+ vm_tgid_(getpid()),
interval_(interval) {}
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
@@ -1159,6 +1137,7 @@
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
+ RestoreSignalHandler();
}
}
@@ -1167,21 +1146,67 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (RuntimeProfiler::IsEnabled()) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
return;
}
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
}
- Sleep(FULL_INTERVAL);
}
}
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
if (!sampler->isolate()->IsInitialized()) return;
sampler->isolate()->runtime_profiler()->NotifyTick();
}
+ void SendProfilingSignal(int tid) {
+ if (!signal_handler_installed_) return;
+ // Glibc doesn't provide a wrapper for tgkill(2).
+#if defined(ANDROID)
+ syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
+#else
+ syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
+#endif
+ }
+
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
@@ -1204,12 +1229,15 @@
#endif // ANDROID
}
+ const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
@@ -1218,6 +1246,8 @@
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
@@ -1245,13 +1275,11 @@
}
#endif
SignalSender::SetUp();
- CpuProfilerSignalHandler::SetUp();
}
void OS::TearDown() {
SignalSender::TearDown();
- CpuProfilerSignalHandler::TearDown();
delete limit_mutex;
}
@@ -1272,14 +1300,8 @@
}
-void Sampler::DoSample() {
- platform_data()->SendProfilingSignal();
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
- CpuProfilerSignalHandler::InstallSignalHandler();
SetActive(true);
SignalSender::AddActiveSampler(this);
}
@@ -1287,7 +1309,6 @@
void Sampler::Stop() {
ASSERT(IsActive());
- CpuProfilerSignalHandler::RestoreSignalHandler();
SignalSender::RemoveActiveSampler(this);
SetActive(false);
}
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index a570868..a216f6e 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -819,8 +819,9 @@
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
- TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
@@ -857,7 +858,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
- CpuProfiler::FinishTickSampleEvent(sampler->isolate());
thread_resume(profiled_thread);
}
@@ -910,11 +910,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 679ef8e..7aaa7b2 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -215,6 +215,11 @@
}
+CpuImplementer OS::GetCpuImplementer() {
+ UNIMPLEMENTED();
+}
+
+
bool OS::ArmCpuHasFeature(CpuFeature feature) {
UNIMPLEMENTED();
}
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index f96d9e3..408d4dc 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -731,8 +731,9 @@
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
sample->state = isolate->current_vm_state();
@@ -761,7 +762,6 @@
#endif // __NetBSD__
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
@@ -964,11 +964,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 36d89e6..4248ea2 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -669,8 +669,9 @@
Sampler* sampler = isolate->logger()->sampler();
if (sampler == NULL || !sampler->IsActive()) return;
- TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate);
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
@@ -683,7 +684,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
- CpuProfiler::FinishTickSampleEvent(isolate);
}
class Sampler::PlatformData : public Malloced {
@@ -887,11 +887,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 65d31a9..49463be 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -2038,8 +2038,9 @@
CONTEXT context;
memset(&context, 0, sizeof(context));
- TickSample* sample = CpuProfiler::StartTickSampleEvent(sampler->isolate());
- if (sample == NULL) return;
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+ if (sample == NULL) sample = &sample_obj;
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread) == kSuspendFailed) return;
@@ -2059,7 +2060,6 @@
sampler->SampleStack(sample);
sampler->Tick(sample);
}
- CpuProfiler::FinishTickSampleEvent(sampler->isolate());
ResumeThread(profiled_thread);
}
@@ -2114,11 +2114,6 @@
}
-void Sampler::DoSample() {
- // TODO(rogulenko): implement
-}
-
-
void Sampler::Start() {
ASSERT(!IsActive());
SetActive(true);
diff --git a/src/platform.h b/src/platform.h
index 4f8fdc3..de896ac 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -308,6 +308,9 @@
// Returns the double constant NAN
static double nan_value();
+ // Support runtime detection of Cpu implementer
+ static CpuImplementer GetCpuImplementer();
+
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
@@ -741,9 +744,6 @@
IncSamplesTaken();
}
- // Performs platform-specific stack sampling.
- void DoSample();
-
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index d9259c8..b853f33 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2009,11 +2009,6 @@
Map::kConstructorOffset);
if (map->HasTransitionArray()) {
TransitionArray* transitions = map->transitions();
- DescriptorArray* descriptors = transitions->descriptors();
- TagObject(descriptors, "(map descriptors)");
- SetInternalReference(transitions, entry,
- "descriptors", descriptors,
- TransitionArray::kDescriptorsOffset);
Object* back_pointer = transitions->back_pointer_storage();
TagObject(transitions->back_pointer_storage(), "(back pointer)");
@@ -2034,6 +2029,12 @@
"backpointer", back_pointer,
Map::kTransitionsOrBackPointerOffset);
}
+ DescriptorArray* descriptors = map->instance_descriptors();
+ TagObject(descriptors, "(map descriptors)");
+ SetInternalReference(map, entry,
+ "descriptors", descriptors,
+ Map::kDescriptorsOffset);
+
SetInternalReference(map, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index a4719b5..82ba34d 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -67,11 +67,7 @@
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_TARGET_CAN_READ_UNALIGNED
- return !slow_safe();
-#else
- return false;
-#endif
+ return FLAG_enable_unaligned_accesses && !slow_safe();
}
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
diff --git a/src/runtime.cc b/src/runtime.cc
index 356b6fc..19d9a3f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2254,6 +2254,7 @@
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
+ ASSERT(target->next_function_link()->IsUndefined());
// Make sure we get a fresh copy of the literal vector to avoid cross
// context contamination.
@@ -2267,7 +2268,6 @@
}
target->set_context(*context);
target->set_literals(*literals);
- target->set_next_function_link(isolate->heap()->undefined_value());
if (isolate->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(isolate)) {
diff --git a/src/spaces.h b/src/spaces.h
index 7ed0977..95c63d6 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -100,9 +100,6 @@
#define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-#define ASSERT_MAP_ALIGNED(address) \
- ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
-
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
@@ -510,7 +507,7 @@
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize;
static const int kBodyOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
+ CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
// The start offset of the object area in a page. Aligned to both maps and
// code alignment to be suitable for both. Also aligned to 32 words because
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 95a400e..cfaa99d 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -84,23 +84,6 @@
}
-Object** TransitionArray::GetDescriptorsSlot() {
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kDescriptorsOffset);
-}
-
-
-DescriptorArray* TransitionArray::descriptors() {
- return DescriptorArray::cast(get(kDescriptorsIndex));
-}
-
-
-void TransitionArray::set_descriptors(DescriptorArray* descriptors) {
- ASSERT(descriptors->IsDescriptorArray());
- set(kDescriptorsIndex, descriptors);
-}
-
-
Object* TransitionArray::back_pointer_storage() {
return get(kBackPointerStorageIndex);
}
@@ -208,6 +191,11 @@
int TransitionArray::Search(String* name) {
+ if (IsSimpleTransition()) {
+ String* key = GetKey(kSimpleTransitionIndex);
+ if (key->Equals(name)) return kSimpleTransitionIndex;
+ return kNotFound;
+ }
return internal::Search<ALL_ENTRIES>(this, name);
}
diff --git a/src/transitions.cc b/src/transitions.cc
index a65f3c3..56b6caf 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -73,7 +73,6 @@
MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag,
String* key,
Map* target,
- DescriptorArray* descriptors,
Object* back_pointer) {
TransitionArray* result;
MaybeObject* maybe_result;
@@ -88,16 +87,10 @@
result->NoIncrementalWriteBarrierSet(0, key, target);
}
result->set_back_pointer_storage(back_pointer);
- result->set_descriptors(descriptors);
return result;
}
-MaybeObject* TransitionArray::AllocateDescriptorsHolder() {
- return AllocateRaw(kDescriptorsHolderSize);
-}
-
-
MaybeObject* TransitionArray::ExtendToFullTransitionArray() {
ASSERT(!IsFullTransitionArray());
int nof = number_of_transitions();
@@ -109,7 +102,6 @@
result->NoIncrementalWriteBarrierCopyFrom(this, kSimpleTransitionIndex, 0);
}
- result->set_descriptors(descriptors());
result->set_back_pointer_storage(back_pointer_storage());
return result;
}
@@ -128,8 +120,6 @@
maybe_array = TransitionArray::Allocate(new_size);
if (!maybe_array->To(&result)) return maybe_array;
- result->set_descriptors(descriptors());
-
if (HasElementsTransition()) {
result->set_elements_transition(elements_transition());
}
diff --git a/src/transitions.h b/src/transitions.h
index 9643a61..0a66026 100644
--- a/src/transitions.h
+++ b/src/transitions.h
@@ -39,13 +39,22 @@
// TransitionArrays are fixed arrays used to hold map transitions for property,
-// constant, and element changes.
-// The format of the these objects is:
-// [0] Descriptor array
-// [1] Undefined or back pointer map
-// [2] Smi(0) or elements transition map
-// [3] Smi(0) or fixed array of prototype transitions
-// [4] First transition
+// constant, and element changes. They can either be simple transition arrays
+// that store a single property transition, or a full transition array that has
+// space for elements transitions, prototype transitions and multiple property
+// transitons. The details related to property transitions are accessed in the
+// descriptor array of the target map. In the case of a simple transition, the
+// key is also read from the descriptor array of the target map.
+//
+// The simple format of the these objects is:
+// [0] Undefined or back pointer map
+// [1] Single transition
+//
+// The full format is:
+// [0] Undefined or back pointer map
+// [1] Smi(0) or elements transition map
+// [2] Smi(0) or fixed array of prototype transitions
+// [3] First transition
// [length() - kTransitionSize] Last transition
class TransitionArray: public FixedArray {
public:
@@ -71,10 +80,6 @@
inline bool HasElementsTransition();
inline void ClearElementsTransition();
- inline Object** GetDescriptorsSlot();
- inline DescriptorArray* descriptors();
- inline void set_descriptors(DescriptorArray* descriptors);
-
inline Object* back_pointer_storage();
inline void set_back_pointer_storage(
Object* back_pointer,
@@ -102,11 +107,8 @@
SimpleTransitionFlag flag,
String* key,
Map* target,
- DescriptorArray* descriptors,
Object* back_pointer);
- static MUST_USE_RESULT MaybeObject* AllocateDescriptorsHolder();
-
MUST_USE_RESULT MaybeObject* ExtendToFullTransitionArray();
// Copy the transition array, inserting a new transition.
@@ -125,7 +127,6 @@
// Allocates a TransitionArray.
MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions);
- bool IsDescriptorsHolder() { return length() == kDescriptorsHolderSize; }
bool IsSimpleTransition() { return length() == kSimpleTransitionSize; }
bool IsFullTransitionArray() { return length() >= kFirstIndex; }
@@ -135,24 +136,20 @@
// Constant for denoting key was not found.
static const int kNotFound = -1;
- static const int kDescriptorsIndex = 0;
- static const int kBackPointerStorageIndex = 1;
- static const int kDescriptorsHolderSize = 2;
+ static const int kBackPointerStorageIndex = 0;
// Layout for full transition arrays.
- static const int kElementsTransitionIndex = 2;
- static const int kPrototypeTransitionsIndex = 3;
- static const int kFirstIndex = 4;
+ static const int kElementsTransitionIndex = 1;
+ static const int kPrototypeTransitionsIndex = 2;
+ static const int kFirstIndex = 3;
// Layout for simple transition arrays.
- static const int kSimpleTransitionTarget = 2;
- static const int kSimpleTransitionSize = 3;
+ static const int kSimpleTransitionTarget = 1;
+ static const int kSimpleTransitionSize = 2;
static const int kSimpleTransitionIndex = 0;
STATIC_ASSERT(kSimpleTransitionIndex != kNotFound);
- static const int kDescriptorsOffset = FixedArray::kHeaderSize;
- static const int kBackPointerStorageOffset = kDescriptorsOffset +
- kPointerSize;
+ static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
// Layout for the full transition array header.
static const int kElementsTransitionOffset = kBackPointerStorageOffset +
diff --git a/src/v8globals.h b/src/v8globals.h
index 1d22a11..95390ad 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -52,15 +52,6 @@
const intptr_t kDoubleAlignment = 8;
const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-// Desired alignment for maps.
-#if V8_HOST_ARCH_64_BIT
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
-#else
-const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
-#endif
-const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
-const intptr_t kMapAlignmentMask = kMapAlignment - 1;
-
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
const int kCodeAlignmentBits = 5;
@@ -396,10 +387,6 @@
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
-#define MAP_POINTER_ALIGN(value) \
- (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
-
// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value) \
(((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
@@ -425,6 +412,13 @@
#endif
+enum CpuImplementer {
+ UNKNOWN_IMPLEMENTER,
+ ARM_IMPLEMENTER,
+ QUALCOMM_IMPLEMENTER
+};
+
+
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
@@ -439,6 +433,8 @@
ARMv7 = 2, // ARM
VFP2 = 3, // ARM
SUDIV = 4, // ARM
+ UNALIGNED_ACCESSES = 5, // ARM
+ MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
diff --git a/src/version.cc b/src/version.cc
index 083abc5..213259f 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 14
-#define BUILD_NUMBER 4
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 5
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f3940e8..d022340 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -195,6 +195,12 @@
CPU::FlushICache(pc, sizeof(int32_t));
}
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e00b403..e8b0be9 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -581,6 +581,10 @@
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ static inline Address target_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -620,6 +624,7 @@
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
static const int kShortCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = 4;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 0502502..a3fe8f9 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -104,19 +104,7 @@
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- Context* context = function->context()->native_context();
- Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- SharedFunctionInfo* shared = function->shared();
- while (!element->IsUndefined()) {
- JSFunction* func = JSFunction::cast(element);
- // Grab element before code replacement as ReplaceCode alters the list.
- element = func->next_function_link();
- if (func->code() == code) {
- func->ReplaceCode(shared->code());
- }
- }
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index c08ca7b..b461e62 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -4490,8 +4490,7 @@
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- Register temp_reg = ToRegister(instr->temp());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
}
@@ -4505,8 +4504,7 @@
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
- Register temp_reg = ToRegister(instr->temp());
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp2());
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4526,7 +4524,7 @@
// Heap number
__ bind(&heap_number);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+ __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index add5676..43fb8b9 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1697,8 +1697,7 @@
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg,
- TempRegister()));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
} else {
@@ -1706,7 +1705,6 @@
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
- TempRegister(),
FixedTemp(xmm1));
return AssignEnvironment(DefineSameAsFirst(result));
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 1eba2da..6cf4af6 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -2138,15 +2138,13 @@
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
- temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
@@ -2164,19 +2162,16 @@
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* unclamped,
- LOperand* temp,
- LOperand* temp2) {
+ LOperand* temp_xmm) {
inputs_[0] = unclamped;
- temps_[0] = temp;
- temps_[1] = temp2;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 49849d8..7750674 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2868,16 +2868,24 @@
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg) {
+ Register result_reg) {
Label done;
- Set(result_reg, 0);
+ Label conv_failure;
xorps(temp_xmm_reg, temp_xmm_reg);
- ucomisd(input_reg, temp_xmm_reg);
- j(below, &done, Label::kNear);
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
+ cmpl(result_reg, Immediate(0x80000000));
+ j(equal, &conv_failure, Label::kNear);
+ movl(result_reg, Immediate(0));
+ setcc(above, result_reg);
+ subl(result_reg, Immediate(1));
+ andl(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
+ Set(result_reg, 0);
+ ucomisd(input_reg, temp_xmm_reg);
+ j(below, &done, Label::kNear);
Set(result_reg, 255);
bind(&done);
}
@@ -2905,28 +2913,7 @@
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- Register temp = descriptors;
- movq(temp, FieldOperand(map, Map::kTransitionsOrBackPointerOffset));
-
- Label ok, fail, load_from_back_pointer;
- CheckMap(temp,
- isolate()->factory()->fixed_array_map(),
- &fail,
- DONT_DO_SMI_CHECK);
- movq(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
- jmp(&ok);
-
- bind(&fail);
- CompareRoot(temp, Heap::kUndefinedValueRootIndex);
- j(not_equal, &load_from_back_pointer, Label::kNear);
- Move(descriptors, isolate()->factory()->empty_descriptor_array());
- jmp(&ok);
-
- bind(&load_from_back_pointer);
- movq(temp, FieldOperand(temp, Map::kTransitionsOrBackPointerOffset));
- movq(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
-
- bind(&ok);
+ movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
}
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 8c1c101..cc057ac 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -942,8 +942,7 @@
void ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg);
+ Register result_reg);
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index c55fc2a..ab59e33 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -84,3 +84,6 @@
# platform-tls.h does not contain an ANDROID-related header.
test-platform-tls/FastTLS: SKIP
+
+# This test times out.
+test-threads/ThreadJoinSelf: SKIP
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 24c7f1f..7ba2583 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -158,7 +158,7 @@
Handle<DescriptorArray> new_descriptors = FACTORY->NewDescriptorArray(0, 1);
v8::internal::DescriptorArray::WhitenessWitness witness(*new_descriptors);
- v8::internal::Map::SetDescriptors(map, new_descriptors);
+ map->set_instance_descriptors(*new_descriptors);
CallbacksDescriptor d(*name,
*foreign,
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index a8f340d..3be0680 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -969,22 +969,33 @@
THREADED_TEST(TinyInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
int32_t value = 239;
Local<v8::Integer> value_obj = v8::Integer::New(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::New(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
THREADED_TEST(BigSmiInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
int32_t value = i::Smi::kMaxValue;
// We cannot add one to a Smi::kMaxValue without wrapping.
if (i::kSmiValueSize < 32) {
CHECK(i::Smi::IsValid(value));
CHECK(!i::Smi::IsValid(value + 1));
+
Local<v8::Integer> value_obj = v8::Integer::New(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::New(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
}
@@ -992,6 +1003,8 @@
THREADED_TEST(BigInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
// We cannot add one to a Smi::kMaxValue without wrapping.
if (i::kSmiValueSize < 32) {
// The casts allow this to compile, even if Smi::kMaxValue is 2^31-1.
@@ -1000,8 +1013,12 @@
static_cast<int32_t>(static_cast<uint32_t>(i::Smi::kMaxValue) + 1);
CHECK(value > i::Smi::kMaxValue);
CHECK(!i::Smi::IsValid(value));
+
Local<v8::Integer> value_obj = v8::Integer::New(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::New(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
}
@@ -1009,42 +1026,66 @@
THREADED_TEST(TinyUnsignedInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
uint32_t value = 239;
+
Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
THREADED_TEST(BigUnsignedSmiInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue);
CHECK(i::Smi::IsValid(value));
CHECK(!i::Smi::IsValid(value + 1));
+
Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
THREADED_TEST(BigUnsignedInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue) + 1;
CHECK(value > static_cast<uint32_t>(i::Smi::kMaxValue));
CHECK(!i::Smi::IsValid(value));
+
Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
THREADED_TEST(OutOfSignedRangeUnsignedInteger) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+
uint32_t INT32_MAX_AS_UINT = (1U << 31) - 1;
uint32_t value = INT32_MAX_AS_UINT + 1;
CHECK(value > INT32_MAX_AS_UINT); // No overflow.
+
Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+
+ value_obj = v8::Integer::NewFromUnsigned(value, isolate);
+ CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
}
@@ -2414,20 +2455,19 @@
bool message_received;
-static void check_message(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
- CHECK_EQ(5.76, data->NumberValue());
+static void check_message_0(v8::Handle<v8::Message> message,
+ v8::Handle<Value> data) {
CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
CHECK_EQ(7.56, message->GetScriptData()->NumberValue());
message_received = true;
}
-THREADED_TEST(MessageHandlerData) {
+THREADED_TEST(MessageHandler0) {
message_received = false;
v8::HandleScope scope;
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message, v8_num(5.76));
+ v8::V8::AddMessageListener(check_message_0);
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"));
@@ -2437,7 +2477,56 @@
script->Run();
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message);
+ v8::V8::RemoveMessageListeners(check_message_0);
+}
+
+
+static void check_message_1(v8::Handle<v8::Message> message,
+ v8::Handle<Value> data) {
+ CHECK(data->IsNumber());
+ CHECK_EQ(1337, data->Int32Value());
+ message_received = true;
+}
+
+
+TEST(MessageHandler1) {
+ message_received = false;
+ v8::HandleScope scope;
+ CHECK(!message_received);
+ v8::V8::AddMessageListener(check_message_1);
+ LocalContext context;
+ CompileRun("throw 1337;");
+ CHECK(message_received);
+ // clear out the message listener
+ v8::V8::RemoveMessageListeners(check_message_1);
+}
+
+
+static void check_message_2(v8::Handle<v8::Message> message,
+ v8::Handle<Value> data) {
+ LocalContext context;
+ CHECK(data->IsObject());
+ v8::Local<v8::Value> hidden_property =
+ v8::Object::Cast(*data)->GetHiddenValue(v8_str("hidden key"));
+ CHECK(v8_str("hidden value")->Equals(hidden_property));
+ message_received = true;
+}
+
+
+TEST(MessageHandler2) {
+ message_received = false;
+ v8::HandleScope scope;
+ CHECK(!message_received);
+ v8::V8::AddMessageListener(check_message_2);
+ LocalContext context;
+ v8::Local<v8::Value> error = v8::Exception::Error(v8_str("custom error"));
+ v8::Object::Cast(*error)->SetHiddenValue(v8_str("hidden key"),
+ v8_str("hidden value"));
+ context->Global()->Set(v8_str("error"), error);
+ CompileRun("throw error;");
+ CHECK(message_received);
+ // clear out the message listener
+ v8::V8::RemoveMessageListeners(check_message_2);
}
@@ -3078,7 +3167,7 @@
"Number.prototype.toString = function() { return 'Whoops'; };"
"ReferenceError.prototype.toString = Object.prototype.toString;");
CompileRun("asdf;");
- v8::V8::RemoveMessageListeners(check_message);
+ v8::V8::RemoveMessageListeners(check_reference_error_message);
}
@@ -3125,7 +3214,7 @@
LocalContext context(0, templ);
CompileRun("ThrowFromC();");
CHECK(message_received);
- v8::V8::RemoveMessageListeners(check_message);
+ v8::V8::RemoveMessageListeners(receive_message);
}
@@ -3143,7 +3232,7 @@
CHECK(try_catch.HasCaught());
CHECK(result.IsEmpty());
CHECK(message_received);
- v8::V8::RemoveMessageListeners(check_message);
+ v8::V8::RemoveMessageListeners(receive_message);
}
@@ -5108,7 +5197,6 @@
static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
- CHECK_EQ(v8::Undefined(), data);
CHECK(message->GetScriptResourceName()->IsUndefined());
CHECK_EQ(v8::Undefined(), message->GetScriptResourceName());
message->GetLineNumber();
@@ -5229,7 +5317,9 @@
bool object_a_disposed = false;
object_a.MakeWeak(&object_a_disposed, &DisposeAndSetFlag);
+ CHECK(!object_a.IsIndependent());
object_a.MarkIndependent();
+ CHECK(object_a.IsIndependent());
HEAP->PerformScavenge();
CHECK(object_a_disposed);
}
@@ -16177,6 +16267,45 @@
}
+class Visitor42 : public v8::PersistentHandleVisitor {
+ public:
+ explicit Visitor42(v8::Persistent<v8::Object> object)
+ : counter_(0), object_(object) { }
+
+ virtual void VisitPersistentHandle(Persistent<Value> value,
+ uint16_t class_id) {
+ if (class_id == 42) {
+ CHECK(value->IsObject());
+ v8::Persistent<v8::Object> visited =
+ v8::Persistent<v8::Object>::Cast(value);
+ CHECK_EQ(42, visited.WrapperClassId());
+ CHECK_EQ(object_, visited);
+ ++counter_;
+ }
+ }
+
+ int counter_;
+ v8::Persistent<v8::Object> object_;
+};
+
+
+TEST(PersistentHandleVisitor) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Persistent<v8::Object> object =
+ v8::Persistent<v8::Object>::New(v8::Object::New());
+ CHECK_EQ(0, object.WrapperClassId());
+ object.SetWrapperClassId(42);
+ CHECK_EQ(42, object.WrapperClassId());
+
+ Visitor42 visitor(object);
+ v8::V8::VisitHandlesWithClassIds(&visitor);
+ CHECK_EQ(1, visitor.counter_);
+
+ object.Dispose();
+}
+
+
TEST(RegExp) {
v8::HandleScope scope;
LocalContext context;
@@ -17567,6 +17696,16 @@
}
+THREADED_TEST(Regress157124) {
+ v8::HandleScope scope;
+ LocalContext context;
+ Local<ObjectTemplate> templ = ObjectTemplate::New();
+ Local<Object> obj = templ->NewInstance();
+ obj->GetIdentityHash();
+ obj->DeleteHiddenValue(v8_str("Bug"));
+}
+
+
#ifndef WIN32
class ThreadInterruptTest {
public:
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 589e6d8..b10e688 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -5,7 +5,6 @@
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
-#include "platform.h"
#include "../include/v8-profiler.h"
using i::CodeEntry;
@@ -21,7 +20,7 @@
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator, NULL, 1000);
+ ProfilerEventsProcessor processor(&generator);
processor.Start();
processor.Stop();
processor.Join();
@@ -39,13 +38,11 @@
return reinterpret_cast<i::Address>(n);
}
-static void AddTickSampleEvent(ProfilerEventsProcessor* processor,
- i::Address frame1,
- i::Address frame2 = NULL,
- i::Address frame3 = NULL) {
- i::TickSample* sample;
- i::OS::Sleep(20);
- while ((sample = processor->StartTickSampleEvent()) == NULL) i::OS::Sleep(20);
+static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
+ i::Address frame1,
+ i::Address frame2 = NULL,
+ i::Address frame3 = NULL) {
+ i::TickSample* sample = proc->TickSampleEvent();
sample->pc = frame1;
sample->tos = frame1;
sample->frames_count = 0;
@@ -57,7 +54,6 @@
sample->stack[1] = frame3;
sample->frames_count = 2;
}
- processor->FinishTickSampleEvent();
}
namespace {
@@ -85,7 +81,7 @@
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator, NULL, 1000);
+ ProfilerEventsProcessor processor(&generator);
processor.Start();
// Enqueue code creation events.
@@ -112,8 +108,8 @@
processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeCreateEvent(i::Logger::STUB_TAG, 4, ToAddress(0x1605), 0x10);
- // Add a tick event to enable code events processing.
- AddTickSampleEvent(&processor, ToAddress(0x1000));
+ // Enqueue a tick event to enable code events processing.
+ EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
processor.Stop();
processor.Join();
@@ -146,7 +142,7 @@
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator, NULL, 1000);
+ ProfilerEventsProcessor processor(&generator);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
@@ -158,12 +154,12 @@
"ddd",
ToAddress(0x1400),
0x80);
- AddTickSampleEvent(&processor, ToAddress(0x1210));
- AddTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
- AddTickSampleEvent(&processor,
- ToAddress(0x1404),
- ToAddress(0x1305),
- ToAddress(0x1230));
+ EnqueueTickSampleEvent(&processor, ToAddress(0x1210));
+ EnqueueTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
+ EnqueueTickSampleEvent(&processor,
+ ToAddress(0x1404),
+ ToAddress(0x1305),
+ ToAddress(0x1230));
processor.Stop();
processor.Join();
@@ -236,7 +232,7 @@
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator, NULL, 1000);
+ ProfilerEventsProcessor processor(&generator);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
@@ -244,14 +240,13 @@
ToAddress(0x1200),
0x80);
- i::TickSample* sample = processor.StartTickSampleEvent();
+ i::TickSample* sample = processor.TickSampleEvent();
sample->pc = ToAddress(0x1200);
sample->tos = 0;
sample->frames_count = i::TickSample::kMaxFramesCount;
for (int i = 0; i < sample->frames_count; ++i) {
sample->stack[i] = ToAddress(0x1200);
}
- processor.FinishTickSampleEvent();
processor.Stop();
processor.Join();
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 4f7421b..5235971 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -1676,18 +1676,8 @@
CHECK_NE(NULL, map);
const v8::HeapGraphNode* own_descriptors = GetProperty(
map, v8::HeapGraphEdge::kInternal, "descriptors");
- CHECK_EQ(NULL, own_descriptors);
+ CHECK_NE(NULL, own_descriptors);
const v8::HeapGraphNode* own_transitions = GetProperty(
map, v8::HeapGraphEdge::kInternal, "transitions");
CHECK_EQ(NULL, own_transitions);
-
- const v8::HeapGraphNode* back_pointer_map =
- GetProperty(map, v8::HeapGraphEdge::kInternal, "backpointer");
- CHECK_NE(NULL, back_pointer_map);
- const v8::HeapGraphNode* descriptors = GetProperty(
- back_pointer_map, v8::HeapGraphEdge::kInternal, "descriptors");
- CHECK_NE(NULL, descriptors);
- const v8::HeapGraphNode* transitions = GetProperty(
- back_pointer_map, v8::HeapGraphEdge::kInternal, "transitions");
- CHECK_NE(NULL, transitions);
}
diff --git a/test/cctest/testcfg.py b/test/cctest/testcfg.py
index b67002f..69a5db2 100644
--- a/test/cctest/testcfg.py
+++ b/test/cctest/testcfg.py
@@ -38,16 +38,20 @@
def __init__(self, name, root):
super(CcTestSuite, self).__init__(name, root)
- self.serdes_dir = normpath(join(root, "..", "..", "out", ".serdes"))
- if exists(self.serdes_dir):
+ self.serdes_dir = os.path.normpath(
+ os.path.join(root, "..", "..", "out", ".serdes"))
+ if os.path.exists(self.serdes_dir):
shutil.rmtree(self.serdes_dir, True)
os.makedirs(self.serdes_dir)
def ListTests(self, context):
- shell = join(context.shell_dir, self.shell())
if utils.IsWindows():
shell += '.exe'
- output = commands.Execute([shell, '--list'])
+ shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
+ output = commands.Execute([context.command_prefix,
+ shell,
+ '--list',
+ context.extra_flags])
if output.exit_code != 0:
print output.stdout
print output.stderr
@@ -66,7 +70,7 @@
def GetFlagsForTestCase(self, testcase, context):
testname = testcase.path.split(os.path.sep)[-1]
- serialization_file = join(self.serdes_dir, "serdes_" + testname)
+ serialization_file = os.path.join(self.serdes_dir, "serdes_" + testname)
serialization_file += ''.join(testcase.flags).replace('-', '_')
return (testcase.flags + [testcase.path] + context.mode_flags +
["--testing_serialization_file=" + serialization_file])
diff --git a/test/mjsunit/delete-non-configurable.js b/test/mjsunit/delete-non-configurable.js
new file mode 100644
index 0000000..8991f43
--- /dev/null
+++ b/test/mjsunit/delete-non-configurable.js
@@ -0,0 +1,74 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Delete elements of a String object.
+var TIPLI = "tipli"
+var so = new String(TIPLI);
+var length = so.length;
+
+for (var i = 0; i < length; i++) {
+ assertFalse(delete so[i]);
+ assertThrows("'use strict'; delete so[i];", TypeError);
+ assertFalse(delete so[i.toString()]);
+ assertThrows("'use strict'; delete so[i.toString()];", TypeError);
+}
+
+assertEquals(length, so.length);
+assertEquals(new String(TIPLI), so);
+
+// Delete elements of an Array.
+var arr = new Array(length);
+
+for (var i = 0; i < length; i++) {
+ arr[i] = i;
+ Object.defineProperty(arr, i, { configurable: false });
+}
+
+for (var i = 0; i < length; i++) {
+ assertFalse(delete arr[i]);
+ assertThrows("'use strict'; delete arr[i];", TypeError);
+ assertFalse(delete arr[i.toString()]);
+ assertThrows("'use strict'; delete arr[i.toString()];", TypeError);
+ assertEquals(i, arr[i]);
+}
+
+assertEquals(length, arr.length);
+assertTrue(delete arr[length]);
+
+// Delete an element of an Object.
+var INDEX = 28;
+var obj = new Object();
+
+obj[INDEX] = TIPLI;
+Object.defineProperty(obj, INDEX, { configurable: false });
+
+assertFalse(delete obj[INDEX]);
+assertThrows("'use strict'; delete obj[INDEX];", TypeError);
+assertFalse(delete obj[INDEX.toString()]);
+assertThrows("'use strict'; delete obj[INDEX.toString()];", TypeError);
+assertEquals(TIPLI, obj[INDEX]);
+assertTrue(delete arr[INDEX+1]);
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index bead376..54fa185 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -428,5 +428,5 @@
assertEquals(Object.prototype, o.__proto__); // __proto__ isn't changed.
assertEquals(0, Object.keys(o).length); // __proto__ isn't added as enumerable.
-
-
+var json = '{"stuff before slash\\\\stuff after slash":"whatever"}';
+assertEquals(json, JSON.stringify(JSON.parse(json)));
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 7fd22a5..037093b 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -73,6 +73,7 @@
try: PASS, SKIP if $mode == debug
debug-scripts-request: PASS, SKIP if $mode == debug
array-constructor: PASS, SKIP if $mode == debug
+regress/regress-1122: PASS, SKIP if ($mode == debug && $arch == android_arm)
# Flaky test that can hit compilation-time stack overflow in debug mode.
unicode-test: PASS, (PASS || FAIL) if $mode == debug
diff --git a/test/mjsunit/regress/regress-2373.js b/test/mjsunit/regress/regress-2373.js
new file mode 100644
index 0000000..16a87ec
--- /dev/null
+++ b/test/mjsunit/regress/regress-2373.js
@@ -0,0 +1,29 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var o = JSON.parse('{"a":2600753951}');
+assertEquals(2600753951, o.a);
diff --git a/test/mjsunit/regress/regress-2374.js b/test/mjsunit/regress/regress-2374.js
new file mode 100644
index 0000000..b12e5f2
--- /dev/null
+++ b/test/mjsunit/regress/regress-2374.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var msg = '{"result":{"profile":{"head":{"functionName":"(root)","url":"","lineNumber":0,"totalTime":495.7243772462511,"selfTime":0,"numberOfCalls":0,"visible":true,"callUID":2771605942,"children":[{"functionName":"(program)","url":"","lineNumber":0,"totalTime":495.7243772462511,"selfTime":495.7243772462511,"numberOfCalls":0,"visible":true,"callUID":1902715303,"children":[]}]},"bottomUpHead":{"functionName":"(root)","url":"","lineNumber":0,"totalTime":495.7243772462511,"selfTime":0,"numberOfCalls":0,"visible":true,"callUID":2771605942,"children":[{"functionName":"(program)","url":"","lineNumber":0,"totalTime":495.7243772462511,"selfTime":495.7243772462511,"numberOfCalls":0,"visible":true,"callUID":1902715303,"children":[]}]}}},"id":41}';
+
+var obj = JSON.parse(msg);
+var obj2 = JSON.parse(msg);
+
+assertEquals(JSON.stringify(obj), JSON.stringify(obj2));
diff --git a/test/mjsunit/regress/regress-builtin-array-op.js b/test/mjsunit/regress/regress-builtin-array-op.js
new file mode 100644
index 0000000..1e37af3
--- /dev/null
+++ b/test/mjsunit/regress/regress-builtin-array-op.js
@@ -0,0 +1,38 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we invoke the correct sort function in
+// array operations.
+
+var foo = "hest";
+Array.prototype.sort = function(fn) { foo = "fisk"; };
+Function.prototype.call = function() { foo = "caramel"; };
+var a = [2,3,1];
+a[100000] = 0;
+a.join();
+assertEquals("hest", foo);
+
diff --git a/test/preparser/preparser.status b/test/preparser/preparser.status
index 6f15fed..40c5caf 100644
--- a/test/preparser/preparser.status
+++ b/test/preparser/preparser.status
@@ -31,3 +31,8 @@
# escapes (we need to parse to distinguish octal escapes from valid
# back-references).
strict-octal-regexp: FAIL
+
+[ $arch == android_arm || $arch == android_ia32 ]
+# Remove this once the issue above is fixed. Android test runner does not
+# handle "FAIL" test expectation correctly.
+strict-octal-regexp: SKIP
diff --git a/tools/run-tests.py b/tools/run-tests.py
index cab93fb..a49f656 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -60,6 +60,13 @@
"--enable-slow-asserts", "--debug-code", "--verify-heap"],
"release" : ["--nobreak-on-abort", "--nodead-code-elimination"]}
+SUPPORTED_ARCHS = ["android_arm",
+ "android_ia32",
+ "arm",
+ "ia32",
+ "mipsel",
+ "x64"]
+
def BuildOptions():
result = optparse.OptionParser()
@@ -150,7 +157,7 @@
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
- if not arch in ['ia32', 'x64', 'arm', 'mipsel']:
+ if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
@@ -174,12 +181,6 @@
options.shell_dir = os.path.dirname(options.shell)
if options.stress_only:
VARIANT_FLAGS = [["--stress-opt", "--always-opt"]]
- # Simulators are slow, therefore allow a longer default timeout.
- if options.timeout == -1:
- if options.arch == "arm" or options.arch == "mipsel":
- options.timeout = 2 * TIMEOUT_DEFAULT;
- else:
- options.timeout = TIMEOUT_DEFAULT;
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
@@ -264,10 +265,18 @@
# Populate context object.
mode_flags = MODE_FLAGS[mode]
+ timeout = options.timeout
+ if timeout == -1:
+ # Simulators are slow, therefore allow a longer default timeout.
+ if arch in ["android", "arm", "mipsel"]:
+ timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ timeout = TIMEOUT_DEFAULT;
+
options.timeout *= TIMEOUT_SCALEFACTOR[mode]
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
- options.timeout, options.isolates,
+ timeout, options.isolates,
options.command_prefix,
options.extra_flags)
diff --git a/tools/test.py b/tools/test.py
index 8c62136..b3b62b3 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -1371,8 +1371,9 @@
else:
pos = value.find('@')
import urllib
- prefix = urllib.unquote(value[:pos]).split()
- suffix = urllib.unquote(value[pos+1:]).split()
+ import shlex
+ prefix = shlex.split(urllib.unquote(value[:pos]))
+ suffix = shlex.split(urllib.unquote(value[pos+1:]))
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
diff --git a/tools/testrunner/local/commands.py b/tools/testrunner/local/commands.py
index 716d7ba..01f170d 100644
--- a/tools/testrunner/local/commands.py
+++ b/tools/testrunner/local/commands.py
@@ -131,6 +131,7 @@
def Execute(args, verbose=False, timeout=None):
+ args = [ c for c in args if c != "" ]
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
try:
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index 25df043..6004367 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -90,7 +90,9 @@
self.indicator.Starting()
self._RunInternal(jobs)
self.indicator.Done()
- return not self.failed
+ if self.failed:
+ return 1
+ return 0
def _RunInternal(self, jobs):
pool = multiprocessing.Pool(processes=jobs)
@@ -147,6 +149,7 @@
except KeyboardInterrupt:
pool.terminate()
pool.join()
+ raise
except Exception, e:
print("Exception: %s" % e)
pool.terminate()
@@ -165,11 +168,10 @@
if utils.IsWindows():
shell += ".exe"
cmd = ([self.context.command_prefix] +
- [os.path.join(self.context.shell_dir, shell)] +
+ [os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
d8testflag +
test.suite.GetFlagsForTestCase(test, self.context) +
[self.context.extra_flags])
- cmd = [ c for c in cmd if c != "" ]
return cmd
diff --git a/tools/testrunner/local/progress.py b/tools/testrunner/local/progress.py
index 6ae416a..9075a95 100644
--- a/tools/testrunner/local/progress.py
+++ b/tools/testrunner/local/progress.py
@@ -152,6 +152,7 @@
def Done(self):
self.PrintProgress('Done')
+ print "" # Line break.
def AboutToRun(self, test):
self.PrintProgress(test.GetLabel())