Version 3.22.14

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@17250 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 02dbe7a..81a9775 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-10-17: Version 3.22.14
+
+        Performance and stability improvements on all platforms.
+
+
 2013-10-16: Version 3.22.13
 
         Do not look up ArrayBuffer on global object in typed array constructor.
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 6979442..d2fd70f 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2336,6 +2336,10 @@
     case Token::EQ_STRICT:
       cond = eq;
       break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
     case Token::LT:
       cond = is_unsigned ? lo : lt;
       break;
diff --git a/src/builtins.cc b/src/builtins.cc
index aaa8bc4..03fac2d 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -273,9 +273,12 @@
     MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
   }
 
-  HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
-                                     elms->address() + size_delta,
-                                     elms->Size()));
+  HeapProfiler* profiler = heap->isolate()->heap_profiler();
+  if (profiler->is_profiling()) {
+    profiler->ObjectMoveEvent(elms->address(),
+                              elms->address() + size_delta,
+                              elms->Size());
+  }
   return FixedArrayBase::cast(HeapObject::FromAddress(
       elms->address() + to_trim * entry_size));
 }
diff --git a/src/compiler.cc b/src/compiler.cc
index 01e261a..6d09722 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -555,6 +555,33 @@
 }
 
 
+// Sets the expected number of properties based on estimate from compiler.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+                                          int estimate) {
+  // See the comment in SetExpectedNofProperties.
+  if (shared->live_objects_may_exist()) return;
+
+  // If no properties are added in the constructor, they are more likely
+  // to be added later.
+  if (estimate == 0) estimate = 2;
+
+  // TODO(yangguo): check whether those heuristics are still up-to-date.
+  // We do not shrink objects that go into a snapshot (yet), so we adjust
+  // the estimate conservatively.
+  if (Serializer::enabled()) {
+    estimate += 2;
+  } else if (FLAG_clever_optimizations) {
+    // Inobject slack tracking will reclaim redundant inobject space later,
+    // so we can afford to adjust the estimate generously.
+    estimate += 8;
+  } else {
+    estimate += 3;
+  }
+
+  shared->set_expected_nof_properties(estimate);
+}
+
+
 static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   PostponeInterruptsScope postpone(isolate);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index cc317e5..379ae69 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -313,6 +313,8 @@
 DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
 DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
 DEFINE_int(loop_weight, 1, "loop weight for representation inference")
+DEFINE_int(escape_analysis_iterations, 1,
+           "maximum number of escape analysis fix-point iterations")
 
 DEFINE_bool(optimize_for_in, true,
             "optimize functions containing for-in loops")
diff --git a/src/handles.cc b/src/handles.cc
index 20fe116..4cb1827 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -150,54 +150,6 @@
 }
 
 
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
-  // If objects constructed from this function exist then changing
-  // 'estimated_nof_properties' is dangerous since the previous value might
-  // have been compiled into the fast construct stub. More over, the inobject
-  // slack tracking logic might have adjusted the previous value, so even
-  // passing the same value is risky.
-  if (func->shared()->live_objects_may_exist()) return;
-
-  func->shared()->set_expected_nof_properties(nof);
-  if (func->has_initial_map()) {
-    Handle<Map> new_initial_map =
-        func->GetIsolate()->factory()->CopyMap(
-            Handle<Map>(func->initial_map()));
-    new_initial_map->set_unused_property_fields(nof);
-    func->set_initial_map(*new_initial_map);
-  }
-}
-
-
-static int ExpectedNofPropertiesFromEstimate(int estimate) {
-  // If no properties are added in the constructor, they are more likely
-  // to be added later.
-  if (estimate == 0) estimate = 2;
-
-  // We do not shrink objects that go into a snapshot (yet), so we adjust
-  // the estimate conservatively.
-  if (Serializer::enabled()) return estimate + 2;
-
-  // Inobject slack tracking will reclaim redundant inobject space later,
-  // so we can afford to adjust the estimate generously.
-  if (FLAG_clever_optimizations) {
-    return estimate + 8;
-  } else {
-    return estimate + 3;
-  }
-}
-
-
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
-                                          int estimate) {
-  // See the comment in SetExpectedNofProperties.
-  if (shared->live_objects_may_exist()) return;
-
-  shared->set_expected_nof_properties(
-      ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
 void FlattenString(Handle<String> string) {
   CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
 }
diff --git a/src/handles.h b/src/handles.h
index c1400ed..cfdecac 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -299,14 +299,6 @@
 Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
                                Handle<FixedArray> second);
 
-// Sets the expected number of properties for the function's instances.
-void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
-
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
-                                          int estimate);
-
-
 Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
     Handle<JSFunction> constructor,
     Handle<JSGlobalProxy> global);
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 96ae273..7400227 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -37,14 +37,6 @@
 class HeapSnapshot;
 class HeapSnapshotsCollection;
 
-#define HEAP_PROFILE(heap, call)                                             \
-  do {                                                                       \
-    v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \
-    if (profiler != NULL && profiler->is_profiling()) {                      \
-      profiler->call;                                                        \
-    }                                                                        \
-  } while (false)
-
 class HeapProfiler {
  public:
   explicit HeapProfiler(Heap* heap);
diff --git a/src/heap.cc b/src/heap.cc
index 76f34b2..83da35c 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -450,6 +450,10 @@
 #endif  // DEBUG
 
   store_buffer()->GCPrologue();
+
+  if (FLAG_concurrent_osr) {
+    isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+  }
 }
 
 
@@ -2130,9 +2134,12 @@
     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
       // Update NewSpace stats if necessary.
       RecordCopiedObject(heap, target);
-      HEAP_PROFILE(heap,
-                   ObjectMoveEvent(source->address(), target->address(), size));
       Isolate* isolate = heap->isolate();
+      HeapProfiler* heap_profiler = isolate->heap_profiler();
+      if (heap_profiler->is_profiling()) {
+        heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+                                       size);
+      }
       if (isolate->logger()->is_logging_code_events() ||
           isolate->cpu_profiler()->is_profiling()) {
         if (target->IsSharedFunctionInfo()) {
diff --git a/src/hydrogen-dce.cc b/src/hydrogen-dce.cc
index cba9e42..3b64107 100644
--- a/src/hydrogen-dce.cc
+++ b/src/hydrogen-dce.cc
@@ -97,10 +97,12 @@
     for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
       HInstruction* instr = it.Current();
       if (!instr->CheckFlag(HValue::kIsLive)) {
-        // Instruction has not been marked live; assume it is dead and remove.
-        // TODO(titzer): we don't remove constants because some special ones
-        // might be used by later phases and are assumed to be in the graph
-        if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL);
+        // Instruction has not been marked live, so remove it.
+        if (!instr->IsConstant() || instr->block()->block_id() != 0) {
+          // TODO(titzer): Some global constants in block 0 can be used
+          // again later, and can't currently be removed. Fix that.
+          instr->DeleteAndReplaceWith(NULL);
+        }
       } else {
         // Clear the liveness flag to leave the graph clean for the next DCE.
         instr->ClearFlag(HValue::kIsLive);
diff --git a/src/hydrogen-escape-analysis.cc b/src/hydrogen-escape-analysis.cc
index 3a7e10d..1023019 100644
--- a/src/hydrogen-escape-analysis.cc
+++ b/src/hydrogen-escape-analysis.cc
@@ -306,7 +306,7 @@
     number_of_objects_++;
     block_states_.Clear();
 
-    // Perform actual analysis steps.
+    // Perform actual analysis step.
     AnalyzeDataFlow(allocate);
 
     cumulative_values_ += number_of_values_;
@@ -320,8 +320,13 @@
   // TODO(mstarzinger): We disable escape analysis with OSR for now, because
   // spill slots might be uninitialized. Needs investigation.
   if (graph()->has_osr()) return;
-  CollectCapturedValues();
-  PerformScalarReplacement();
+  int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
+  for (int i = 0; i < max_fixpoint_iteration_count; i++) {
+    CollectCapturedValues();
+    if (captured_.is_empty()) break;
+    PerformScalarReplacement();
+    captured_.Clear();
+  }
 }
 
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 31630d1..867fa50 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -2396,6 +2396,12 @@
 }
 
 
+void HCapturedObject::PrintDataTo(StringStream* stream) {
+  stream->Add("#%d ", capture_id());
+  HDematerializedObject::PrintDataTo(stream);
+}
+
+
 void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
                                          Zone* zone) {
   ASSERT(return_target->IsInlineReturnTarget());
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index bdeec0c..696d8b0 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -3317,6 +3317,8 @@
   // Replay effects of this instruction on the given environment.
   void ReplayEnvironment(HEnvironment* env);
 
+  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+
   DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
 
  private:
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index a9e1f5a..8adc25c 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2503,6 +2503,10 @@
     case Token::EQ_STRICT:
       cond = equal;
       break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = not_equal;
+      break;
     case Token::LT:
       cond = is_unsigned ? below : less;
       break;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 2f8f5d7..8ec1e4b 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2759,7 +2759,10 @@
                                          Address src,
                                          int size,
                                          AllocationSpace dest) {
-  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst, size));
+  HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
+  if (heap_profiler->is_profiling()) {
+    heap_profiler->ObjectMoveEvent(src, dst, size);
+  }
   ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
   ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
   if (dest == OLD_POINTER_SPACE) {
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 10477a3..73ecf48 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2178,6 +2178,10 @@
     case Token::EQ_STRICT:
       cond = eq;
       break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
     case Token::LT:
       cond = is_unsigned ? lo : lt;
       break;
diff --git a/src/objects.cc b/src/objects.cc
index 96a3442..4aef808 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -343,9 +343,10 @@
 }
 
 
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
-                                               Object* structure,
-                                               Name* name) {
+Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
+                                                 Handle<Object> receiver,
+                                                 Handle<Object> structure,
+                                                 Handle<Name> name) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
   // data structure used to store the callbacks.  Eventually foreign
@@ -353,66 +354,71 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->foreign_address());
-    MaybeObject* value = (callback->getter)(isolate, receiver, callback->data);
-    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
-    return value;
+            Handle<Foreign>::cast(structure)->foreign_address());
+    CALL_HEAP_FUNCTION(isolate,
+                       (callback->getter)(isolate, *receiver, callback->data),
+                       Object);
   }
 
   // api style callbacks.
   if (structure->IsAccessorInfo()) {
-    if (!AccessorInfo::cast(structure)->IsCompatibleReceiver(receiver)) {
-      Handle<Object> name_handle(name, isolate);
-      Handle<Object> receiver_handle(receiver, isolate);
-      Handle<Object> args[2] = { name_handle, receiver_handle };
+    Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
+    if (!accessor_info->IsCompatibleReceiver(*receiver)) {
+      Handle<Object> args[2] = { name, receiver };
       Handle<Object> error =
           isolate->factory()->NewTypeError("incompatible_method_receiver",
                                            HandleVector(args,
                                                         ARRAY_SIZE(args)));
-      return isolate->Throw(*error);
+      isolate->Throw(*error);
+      return Handle<Object>::null();
     }
     // TODO(rossberg): Handling symbols in the API requires changing the API,
     // so we do not support it for now.
-    if (name->IsSymbol()) return isolate->heap()->undefined_value();
+    if (name->IsSymbol()) return isolate->factory()->undefined_value();
     if (structure->IsDeclaredAccessorInfo()) {
-      return GetDeclaredAccessorProperty(receiver,
-                                         DeclaredAccessorInfo::cast(structure),
-                                         isolate);
+      CALL_HEAP_FUNCTION(
+          isolate,
+          GetDeclaredAccessorProperty(*receiver,
+                                      DeclaredAccessorInfo::cast(*structure),
+                                      isolate),
+          Object);
     }
-    ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
-    Object* fun_obj = data->getter();
+
+    Handle<ExecutableAccessorInfo> data =
+        Handle<ExecutableAccessorInfo>::cast(structure);
     v8::AccessorGetterCallback call_fun =
-        v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
-    if (call_fun == NULL) return isolate->heap()->undefined_value();
+        v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+    if (call_fun == NULL) return isolate->factory()->undefined_value();
+
     HandleScope scope(isolate);
-    JSObject* self = JSObject::cast(receiver);
-    Handle<String> key(String::cast(name));
-    LOG(isolate, ApiNamedPropertyAccess("load", self, name));
-    PropertyCallbackArguments args(isolate, data->data(), self, this);
+    Handle<JSObject> self = Handle<JSObject>::cast(receiver);
+    Handle<String> key = Handle<String>::cast(name);
+    LOG(isolate, ApiNamedPropertyAccess("load", *self, *name));
+    PropertyCallbackArguments args(isolate, data->data(), *self, *object);
     v8::Handle<v8::Value> result =
         args.Call(call_fun, v8::Utils::ToLocal(key));
-    RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+    RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
     if (result.IsEmpty()) {
-      return isolate->heap()->undefined_value();
+      return isolate->factory()->undefined_value();
     }
-    Object* return_value = *v8::Utils::OpenHandle(*result);
+    Handle<Object> return_value = v8::Utils::OpenHandle(*result);
     return_value->VerifyApiCallResultType();
-    return return_value;
+    return scope.CloseAndEscape(return_value);
   }
 
   // __defineGetter__ callback
-  if (structure->IsAccessorPair()) {
-    Object* getter = AccessorPair::cast(structure)->getter();
-    if (getter->IsSpecFunction()) {
-      // TODO(rossberg): nicer would be to cast to some JSCallable here...
-      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
-    }
-    // Getter is not a function.
-    return isolate->heap()->undefined_value();
+  Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(),
+                        isolate);
+  if (getter->IsSpecFunction()) {
+    // TODO(rossberg): nicer would be to cast to some JSCallable here...
+    CALL_HEAP_FUNCTION(
+        isolate,
+        object->GetPropertyWithDefinedGetter(*receiver,
+                                             JSReceiver::cast(*getter)),
+        Object);
   }
-
-  UNREACHABLE();
-  return NULL;
+  // Getter is not a function.
+  return isolate->factory()->undefined_value();
 }
 
 
@@ -507,19 +513,6 @@
 }
 
 
-// TODO(yangguo): this should eventually replace the non-handlified version.
-Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object,
-                                                 Handle<Object> receiver,
-                                                 Handle<Object> structure,
-                                                 Handle<Name> name) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->GetPropertyWithCallback(*receiver,
-                                                     *structure,
-                                                     *name),
-                     Object);
-}
-
-
 // Only deal with CALLBACKS and INTERCEPTOR
 Handle<Object> JSObject::GetPropertyWithFailedAccessCheck(
     Handle<JSObject> object,
@@ -903,9 +896,16 @@
     }
     case CONSTANT:
       return result->GetConstant();
-    case CALLBACKS:
-      return result->holder()->GetPropertyWithCallback(
-          receiver, result->GetCallbackObject(), name);
+    case CALLBACKS: {
+      HandleScope scope(isolate);
+      Handle<Object> value = JSObject::GetPropertyWithCallback(
+          handle(result->holder(), isolate),
+          handle(receiver, isolate),
+          handle(result->GetCallbackObject(), isolate),
+          handle(name, isolate));
+      RETURN_IF_EMPTY_HANDLE(isolate, value);
+      return *value;
+    }
     case HANDLER:
       return result->proxy()->GetPropertyWithHandler(receiver, name);
     case INTERCEPTOR: {
@@ -9381,6 +9381,7 @@
     if (number_of_own_descriptors > 0) {
       TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
       ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+      set_owns_descriptors(true);
     } else {
       ASSERT(descriptors == GetHeap()->empty_descriptor_array());
     }
diff --git a/src/objects.h b/src/objects.h
index 6eb2f0b..30c3f63 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2127,10 +2127,6 @@
                                                 Handle<Object> structure,
                                                 Handle<Name> name);
 
-  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
-                                                       Object* structure,
-                                                       Name* name);
-
   static Handle<Object> SetPropertyWithCallback(
       Handle<JSObject> object,
       Handle<Object> structure,
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 0053148..e9c0254 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -37,6 +37,19 @@
 namespace v8 {
 namespace internal {
 
+OptimizingCompilerThread::~OptimizingCompilerThread() {
+  ASSERT_EQ(0, input_queue_length_);
+  DeleteArray(input_queue_);
+  if (FLAG_concurrent_osr) {
+#ifdef DEBUG
+    for (int i = 0; i < osr_buffer_capacity_; i++) {
+      CHECK_EQ(NULL, osr_buffer_[i]);
+    }
+#endif
+    DeleteArray(osr_buffer_);
+  }
+}
+
 
 void OptimizingCompilerThread::Run() {
 #ifdef DEBUG
@@ -93,12 +106,20 @@
 }
 
 
+RecompileJob* OptimizingCompilerThread::NextInput() {
+  LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+  if (input_queue_length_ == 0) return NULL;
+  RecompileJob* job = input_queue_[InputQueueIndex(0)];
+  ASSERT_NE(NULL, job);
+  input_queue_shift_ = InputQueueIndex(1);
+  input_queue_length_--;
+  return job;
+}
+
+
 void OptimizingCompilerThread::CompileNext() {
-  RecompileJob* job = NULL;
-  bool result = input_queue_.Dequeue(&job);
-  USE(result);
-  ASSERT(result);
-  Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
+  RecompileJob* job = NextInput();
+  ASSERT_NE(NULL, job);
 
   // The function may have already been optimized by OSR.  Simply continue.
   RecompileJob::Status status = job->OptimizeGraph();
@@ -131,7 +152,7 @@
 
 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
   RecompileJob* job;
-  while (input_queue_.Dequeue(&job)) {
+  while ((job = NextInput())) {
     // This should not block, since we have one signal on the input queue
     // semaphore corresponding to each element in the input queue.
     input_queue_semaphore_.Wait();
@@ -140,7 +161,6 @@
       DisposeRecompileJob(job, restore_function_code);
     }
   }
-  Release_Store(&queue_length_, static_cast<AtomicWord>(0));
 }
 
 
@@ -156,12 +176,12 @@
 
 
 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
-  RecompileJob* job;
-  for (int i = 0; i < osr_buffer_size_; i++) {
-    job = osr_buffer_[i];
-    if (job != NULL) DisposeRecompileJob(job, restore_function_code);
+  for (int i = 0; i < osr_buffer_capacity_; i++) {
+    if (osr_buffer_[i] != NULL) {
+      DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+      osr_buffer_[i] = NULL;
+    }
   }
-  osr_cursor_ = 0;
 }
 
 
@@ -187,10 +207,9 @@
   stop_semaphore_.Wait();
 
   if (FLAG_concurrent_recompilation_delay != 0) {
-    // Barrier when loading queue length is not necessary since the write
-    // happens in CompileNext on the same thread.
-    // This is used only for testing.
-    while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
+    // At this point the optimizing compiler thread's event loop has stopped.
+    // There is no need for a mutex when reading input_queue_length_.
+    while (input_queue_length_ > 0) CompileNext();
     InstallOptimizedFunctions();
   } else {
     FlushInputQueue(false);
@@ -239,7 +258,6 @@
 void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
   ASSERT(IsQueueAvailable());
   ASSERT(!IsOptimizerThread());
-  Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
   CompilationInfo* info = job->info();
   if (info->is_osr()) {
     if (FLAG_trace_concurrent_recompilation) {
@@ -247,13 +265,24 @@
       info->closure()->PrintName();
       PrintF(" for concurrent on-stack replacement.\n");
     }
-    AddToOsrBuffer(job);
     osr_attempts_++;
     BackEdgeTable::AddStackCheck(info);
+    AddToOsrBuffer(job);
+    // Add job to the front of the input queue.
+    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+    ASSERT_LT(input_queue_length_, input_queue_capacity_);
+    // Move shift_ back by one.
+    input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
+    input_queue_[InputQueueIndex(0)] = job;
+    input_queue_length_++;
   } else {
     info->closure()->MarkInRecompileQueue();
+    // Add job to the back of the input queue.
+    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+    ASSERT_LT(input_queue_length_, input_queue_capacity_);
+    input_queue_[InputQueueIndex(input_queue_length_)] = job;
+    input_queue_length_++;
   }
-  input_queue_.Enqueue(job);
   if (FLAG_block_concurrent_recompilation) {
     blocked_jobs_++;
   } else {
@@ -274,15 +303,14 @@
 RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
     Handle<JSFunction> function, uint32_t osr_pc_offset) {
   ASSERT(!IsOptimizerThread());
-  RecompileJob* result = NULL;
-  for (int i = 0; i < osr_buffer_size_; i++) {
-    result = osr_buffer_[i];
-    if (result == NULL) continue;
-    if (result->IsWaitingForInstall() &&
-        result->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+  for (int i = 0; i < osr_buffer_capacity_; i++) {
+    RecompileJob* current = osr_buffer_[i];
+    if (current != NULL &&
+        current->IsWaitingForInstall() &&
+        current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
       osr_hits_++;
       osr_buffer_[i] = NULL;
-      return result;
+      return current;
     }
   }
   return NULL;
@@ -292,10 +320,11 @@
 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
                                               uint32_t osr_pc_offset) {
   ASSERT(!IsOptimizerThread());
-  for (int i = 0; i < osr_buffer_size_; i++) {
-    if (osr_buffer_[i] != NULL &&
-        osr_buffer_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
-      return !osr_buffer_[i]->IsWaitingForInstall();
+  for (int i = 0; i < osr_buffer_capacity_; i++) {
+    RecompileJob* current = osr_buffer_[i];
+    if (current != NULL &&
+        current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+      return !current->IsWaitingForInstall();
     }
   }
   return false;
@@ -304,10 +333,10 @@
 
 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
   ASSERT(!IsOptimizerThread());
-  for (int i = 0; i < osr_buffer_size_; i++) {
-    if (osr_buffer_[i] != NULL &&
-        *osr_buffer_[i]->info()->closure() == function) {
-      return !osr_buffer_[i]->IsWaitingForInstall();
+  for (int i = 0; i < osr_buffer_capacity_; i++) {
+    RecompileJob* current = osr_buffer_[i];
+    if (current != NULL && *current->info()->closure() == function) {
+      return !current->IsWaitingForInstall();
     }
   }
   return false;
@@ -316,27 +345,27 @@
 
 void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
   ASSERT(!IsOptimizerThread());
-  // Store into next empty slot or replace next stale OSR job that's waiting
-  // in vain.  Dispose in the latter case.
-  RecompileJob* stale;
+  // Find the next slot that is empty or has a stale job.
   while (true) {
-    stale = osr_buffer_[osr_cursor_];
-    if (stale == NULL) break;
-    if (stale->IsWaitingForInstall()) {
-      CompilationInfo* info = stale->info();
-      if (FLAG_trace_osr) {
-        PrintF("[COSR - Discarded ");
-        info->closure()->PrintName();
-        PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
-      }
-      DisposeRecompileJob(stale, false);
-      break;
-    }
-    AdvanceOsrCursor();
+    RecompileJob* stale = osr_buffer_[osr_buffer_cursor_];
+    if (stale == NULL || stale->IsWaitingForInstall()) break;
+    osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
   }
 
-  osr_buffer_[osr_cursor_] = job;
-  AdvanceOsrCursor();
+  // Add to found slot and dispose the evicted job.
+  RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_];
+  if (evicted != NULL) {
+    ASSERT(evicted->IsWaitingForInstall());
+    CompilationInfo* info = evicted->info();
+    if (FLAG_trace_osr) {
+      PrintF("[COSR - Discarded ");
+      info->closure()->PrintName();
+      PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
+    }
+    DisposeRecompileJob(evicted, false);
+  }
+  osr_buffer_[osr_buffer_cursor_] = job;
+  osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
 }
 
 
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index 8992142..754aece 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -53,22 +53,24 @@
       isolate_(isolate),
       stop_semaphore_(0),
       input_queue_semaphore_(0),
-      osr_cursor_(0),
+      input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
+      input_queue_length_(0),
+      input_queue_shift_(0),
+      osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
+      osr_buffer_cursor_(0),
       osr_hits_(0),
       osr_attempts_(0),
       blocked_jobs_(0) {
     NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
-    NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
+    input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
     if (FLAG_concurrent_osr) {
-      osr_buffer_size_ = FLAG_concurrent_recompilation_queue_length + 4;
-      osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_size_);
-      for (int i = 0; i < osr_buffer_size_; i++) osr_buffer_[i] = NULL;
+      // Allocate and mark OSR buffer slots as empty.
+      osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+      for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
     }
   }
 
-  ~OptimizingCompilerThread() {
-    if (FLAG_concurrent_osr) DeleteArray(osr_buffer_);
-  }
+  ~OptimizingCompilerThread();
 
   void Run();
   void Stop();
@@ -83,17 +85,15 @@
   bool IsQueuedForOSR(JSFunction* function);
 
   inline bool IsQueueAvailable() {
-    // We don't need a barrier since we have a data dependency right
-    // after.
-    Atomic32 current_length = NoBarrier_Load(&queue_length_);
+    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+    return input_queue_length_ < input_queue_capacity_;
+  }
 
-    // This can be queried only from the execution thread.
-    ASSERT(!IsOptimizerThread());
-    // Since only the execution thread increments queue_length_ and
-    // only one thread can run inside an Isolate at one time, a direct
-    // doesn't introduce a race -- queue_length_ may decreased in
-    // meantime, but not increased.
-    return (current_length < FLAG_concurrent_recompilation_queue_length);
+  inline void AgeBufferedOsrJobs() {
+    // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
+    // Dispose said OSR job in the latter case.  Calling this on every GC
+    // should make sure that we do not hold onto stale jobs indefinitely.
+    AddToOsrBuffer(NULL);
   }
 
 #ifdef DEBUG
@@ -107,12 +107,17 @@
   void FlushOutputQueue(bool restore_function_code);
   void FlushOsrBuffer(bool restore_function_code);
   void CompileNext();
+  RecompileJob* NextInput();
 
   // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
   // Tasks evicted from the cyclic buffer are discarded.
   void AddToOsrBuffer(RecompileJob* compiler);
-  void AdvanceOsrCursor() {
-    osr_cursor_ = (osr_cursor_ + 1) % osr_buffer_size_;
+
+  inline int InputQueueIndex(int i) {
+    int result = (i + input_queue_shift_) % input_queue_capacity_;
+    ASSERT_LE(0, result);
+    ASSERT_LT(result, input_queue_capacity_);
+    return result;
   }
 
 #ifdef DEBUG
@@ -124,20 +129,22 @@
   Semaphore stop_semaphore_;
   Semaphore input_queue_semaphore_;
 
-  // Queue of incoming recompilation tasks (including OSR).
-  UnboundQueue<RecompileJob*> input_queue_;
+  // Circular queue of incoming recompilation tasks (including OSR).
+  RecompileJob** input_queue_;
+  int input_queue_capacity_;
+  int input_queue_length_;
+  int input_queue_shift_;
+  Mutex input_queue_mutex_;
+
   // Queue of recompilation tasks ready to be installed (excluding OSR).
   UnboundQueue<RecompileJob*> output_queue_;
+
   // Cyclic buffer of recompilation tasks for OSR.
-  // TODO(yangguo): This may keep zombie tasks indefinitely, holding on to
-  //                a lot of memory.  Fix this.
   RecompileJob** osr_buffer_;
-  // Cursor for the cyclic buffer.
-  int osr_cursor_;
-  int osr_buffer_size_;
+  int osr_buffer_capacity_;
+  int osr_buffer_cursor_;
 
   volatile AtomicWord stop_thread_;
-  volatile Atomic32 queue_length_;
   TimeDelta time_spent_compiling_;
   TimeDelta time_spent_total_;
 
diff --git a/src/runtime.cc b/src/runtime.cc
index cbf1705..0b39a43 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2977,10 +2977,24 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
   CONVERT_SMI_ARG_CHECKED(num, 1);
   RUNTIME_ASSERT(num >= 0);
-  SetExpectedNofProperties(function, num);
+  // If objects constructed from this function exist then changing
+  // 'estimated_nof_properties' is dangerous since the previous value might
+  // have been compiled into the fast construct stub. Moreover, the inobject
+  // slack tracking logic might have adjusted the previous value, so even
+  // passing the same value is risky.
+  if (!func->shared()->live_objects_may_exist()) {
+    func->shared()->set_expected_nof_properties(num);
+    if (func->has_initial_map()) {
+      Handle<Map> new_initial_map =
+          func->GetIsolate()->factory()->CopyMap(
+              Handle<Map>(func->initial_map()));
+      new_initial_map->set_unused_property_fields(num);
+      func->set_initial_map(*new_initial_map);
+    }
+  }
   return isolate->heap()->undefined_value();
 }
 
@@ -10720,19 +10734,20 @@
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
       if (structure->IsForeign() || structure->IsAccessorInfo()) {
-        MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
-            receiver, structure, name);
-        if (!maybe_value->ToObject(&value)) {
-          if (maybe_value->IsRetryAfterGC()) return maybe_value;
-          ASSERT(maybe_value->IsException());
-          maybe_value = heap->isolate()->pending_exception();
+        Isolate* isolate = heap->isolate();
+        HandleScope scope(isolate);
+        Handle<Object> value = JSObject::GetPropertyWithCallback(
+            handle(result->holder(), isolate),
+            handle(receiver, isolate),
+            handle(structure, isolate),
+            handle(name, isolate));
+        if (value.is_null()) {
+          MaybeObject* exception = heap->isolate()->pending_exception();
           heap->isolate()->clear_pending_exception();
-          if (caught_exception != NULL) {
-            *caught_exception = true;
-          }
-          return maybe_value;
+          if (caught_exception != NULL) *caught_exception = true;
+          return exception;
         }
-        return value;
+        return *value;
       } else {
         return heap->undefined_value();
       }
diff --git a/src/typing.cc b/src/typing.cc
index c3fd9c0..03c1ad1 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -603,8 +603,10 @@
     case Token::SHR:
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      // TODO(rossberg): we could use an UnsignedSmi as lower bound here...
-      NarrowType(expr, Bounds(Type::Unsigned32(), isolate_));
+      // TODO(rossberg): The upper bound would be Unsigned32, but since there
+      // is no 'positive Smi' type for the lower bound, we use the smallest
+      // union of Smi and Unsigned32 as upper bound instead.
+      NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_));
       break;
     case Token::ADD: {
       RECURSE(Visit(expr->left()));
diff --git a/src/version.cc b/src/version.cc
index d0ee131..aac2a8c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     22
-#define BUILD_NUMBER      13
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      14
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index b95ac3a..6fe64c5 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2048,6 +2048,10 @@
     case Token::EQ_STRICT:
       cond = equal;
       break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = not_equal;
+      break;
     case Token::LT:
       cond = is_unsigned ? below : less;
       break;