Version 3.12.10

Re-enabled and fixed issue with array bounds check elimination (Chromium issue 132114).

Fixed Debug::Break crash. (Chromium issue 131642)

Added optimizing compiler support for JavaScript getters.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@12029 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index 8048738..b947399 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -802,4 +802,69 @@
   0
 };
 
+
+//
+// Accessors::MakeModuleExport
+//
+
+static v8::Handle<v8::Value> ModuleGetExport(
+    v8::Local<v8::String> property,
+    const v8::AccessorInfo& info) {
+  JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
+  Context* context = Context::cast(instance->context());
+  ASSERT(context->IsModuleContext());
+  int slot = info.Data()->Int32Value();
+  Object* value = context->get(slot);
+  if (value->IsTheHole()) {
+    Handle<String> name = v8::Utils::OpenHandle(*property);
+    Isolate* isolate = instance->GetIsolate();
+    isolate->ScheduleThrow(
+        *isolate->factory()->NewReferenceError("not_defined",
+                                               HandleVector(&name, 1)));
+    return v8::Handle<v8::Value>();
+  }
+  return v8::Utils::ToLocal(Handle<Object>(value));
+}
+
+
+static void ModuleSetExport(
+    v8::Local<v8::String> property,
+    v8::Local<v8::Value> value,
+    const v8::AccessorInfo& info) {
+  JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
+  Context* context = Context::cast(instance->context());
+  ASSERT(context->IsModuleContext());
+  int slot = info.Data()->Int32Value();
+  Object* old_value = context->get(slot);
+  if (old_value->IsTheHole()) {
+    Handle<String> name = v8::Utils::OpenHandle(*property);
+    Isolate* isolate = instance->GetIsolate();
+    isolate->ScheduleThrow(
+        *isolate->factory()->NewReferenceError("not_defined",
+                                               HandleVector(&name, 1)));
+    return;
+  }
+  context->set(slot, *v8::Utils::OpenHandle(*value));
+}
+
+
+Handle<AccessorInfo> Accessors::MakeModuleExport(
+    Handle<String> name,
+    int index,
+    PropertyAttributes attributes) {
+  Factory* factory = name->GetIsolate()->factory();
+  Handle<AccessorInfo> info = factory->NewAccessorInfo();
+  info->set_property_attributes(attributes);
+  info->set_all_can_read(true);
+  info->set_all_can_write(true);
+  info->set_name(*name);
+  info->set_data(Smi::FromInt(index));
+  Handle<Object> getter = v8::FromCData(&ModuleGetExport);
+  Handle<Object> setter = v8::FromCData(&ModuleSetExport);
+  info->set_getter(*getter);
+  if (!(attributes & ReadOnly)) info->set_setter(*setter);
+  return info;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/accessors.h b/src/accessors.h
index 36b9a99..250f742 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -85,6 +85,10 @@
                                                       void*);
   static MaybeObject* FunctionGetArguments(Object* object, void*);
 
+  // Accessor infos.
+  static Handle<AccessorInfo> MakeModuleExport(
+      Handle<String> name, int index, PropertyAttributes attributes);
+
  private:
   // Accessor functions only used through the descriptor.
   static MaybeObject* FunctionGetLength(Object* object, void*);
diff --git a/src/ast.cc b/src/ast.cc
index 69ee2fa..07e5003 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1030,6 +1030,14 @@
     increase_node_count(); \
     add_flag(kDontSelfOptimize); \
   }
+#define DONT_CACHE_NODE(NodeType) \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count(); \
+    add_flag(kDontOptimize); \
+    add_flag(kDontInline); \
+    add_flag(kDontSelfOptimize); \
+    add_flag(kDontCache); \
+  }
 
 REGULAR_NODE(VariableDeclaration)
 REGULAR_NODE(FunctionDeclaration)
@@ -1061,10 +1069,13 @@
 // LOOKUP variables only result from constructs that cannot be inlined anyway.
 REGULAR_NODE(VariableProxy)
 
+// We currently do not optimize any modules. Note in particular, that module
+// instance objects associated with ModuleLiterals are allocated during
+// scope resolution, and references to them are embedded into the code.
+// That code may hence neither be cached nor re-compiled.
 DONT_OPTIMIZE_NODE(ModuleDeclaration)
 DONT_OPTIMIZE_NODE(ImportDeclaration)
 DONT_OPTIMIZE_NODE(ExportDeclaration)
-DONT_OPTIMIZE_NODE(ModuleLiteral)
 DONT_OPTIMIZE_NODE(ModuleVariable)
 DONT_OPTIMIZE_NODE(ModulePath)
 DONT_OPTIMIZE_NODE(ModuleUrl)
@@ -1082,6 +1093,8 @@
 DONT_SELFOPTIMIZE_NODE(ForStatement)
 DONT_SELFOPTIMIZE_NODE(ForInStatement)
 
+DONT_CACHE_NODE(ModuleLiteral)
+
 void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
   increase_node_count();
   if (node->is_jsruntime()) {
@@ -1102,6 +1115,7 @@
 #undef DONT_OPTIMIZE_NODE
 #undef DONT_INLINE_NODE
 #undef DONT_SELFOPTIMIZE_NODE
+#undef DONT_CACHE_NODE
 
 
 Handle<String> Literal::ToString() {
diff --git a/src/ast.h b/src/ast.h
index a2aea52..156b879 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -165,7 +165,8 @@
   kDontInline,
   kDontOptimize,
   kDontSelfOptimize,
-  kDontSoftInline
+  kDontSoftInline,
+  kDontCache
 };
 
 
@@ -586,23 +587,27 @@
  protected:
   template<class> friend class AstNodeFactory;
 
-  ExportDeclaration(VariableProxy* proxy,
-                    Scope* scope)
-      : Declaration(proxy, LET, scope) {
-  }
+  ExportDeclaration(VariableProxy* proxy, Scope* scope)
+      : Declaration(proxy, LET, scope) {}
 };
 
 
 class Module: public AstNode {
  public:
   Interface* interface() const { return interface_; }
+  Block* body() const { return body_; }
 
  protected:
-  explicit Module(Zone* zone) : interface_(Interface::NewModule(zone)) {}
-  explicit Module(Interface* interface) : interface_(interface) {}
+  explicit Module(Zone* zone)
+      : interface_(Interface::NewModule(zone)),
+        body_(NULL) {}
+  explicit Module(Interface* interface, Block* body = NULL)
+      : interface_(interface),
+        body_(body) {}
 
  private:
   Interface* interface_;
+  Block* body_;
 };
 
 
@@ -610,20 +615,10 @@
  public:
   DECLARE_NODE_TYPE(ModuleLiteral)
 
-  Block* body() const { return body_; }
-  Handle<Context> context() const { return context_; }
-
  protected:
   template<class> friend class AstNodeFactory;
 
-  ModuleLiteral(Block* body, Interface* interface)
-      : Module(interface),
-        body_(body) {
-  }
-
- private:
-  Block* body_;
-  Handle<Context> context_;
+  ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {}
 };
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 5c87178..5a076d2 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -161,10 +161,6 @@
   // Lookup the code in the (possibly custom) cache.
   bool FindCodeInCache(Code** code_out);
 
- protected:
-  static const int kMajorBits = 6;
-  static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
-
  private:
   // Nonvirtual wrapper around the stub-specific Generate function.  Call
   // this function to set up the macro assembler and generate the code.
@@ -222,8 +218,9 @@
            MajorKeyBits::encode(MajorKey());
   }
 
-  class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
-  class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
+  class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
+  class MinorKeyBits: public BitField<uint32_t,
+      kStubMajorKeyBits, kStubMinorKeyBits> {};  // NOLINT
 
   friend class BreakPointIterator;
 };
@@ -498,7 +495,7 @@
 
   virtual void FinishCode(Handle<Code> code) {
     code->set_compare_state(state_);
-    code->set_compare_operation(op_);
+    code->set_compare_operation(op_ - Token::EQ);
   }
 
   virtual CodeStub::Major MajorKey() { return CompareIC; }
diff --git a/src/compiler.cc b/src/compiler.cc
index e282a7f..6340773 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -543,7 +543,7 @@
       info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
     }
     result = MakeFunctionInfo(&info);
-    if (extension == NULL && !result.is_null()) {
+    if (extension == NULL && !result.is_null() && !result->dont_cache()) {
       compilation_cache->PutScript(source, result);
     }
   } else {
@@ -602,8 +602,10 @@
       // extended mode.
       ASSERT(language_mode != EXTENDED_MODE ||
              result->is_extended_mode());
-      compilation_cache->PutEval(
-          source, context, is_global, result, scope_position);
+      if (!result->dont_cache()) {
+        compilation_cache->PutEval(
+            source, context, is_global, result, scope_position);
+      }
     }
   } else {
     if (result->ic_age() != HEAP->global_ic_age()) {
@@ -717,6 +719,7 @@
         shared->set_code_age(0);
         shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
         shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+        shared->set_dont_cache(lit->flags()->Contains(kDontCache));
         shared->set_ast_node_count(lit->ast_node_count());
 
         if (V8::UseCrankshaft()&&
@@ -830,6 +833,7 @@
   function_info->set_is_function(lit->is_function());
   function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
   function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+  function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
 }
 
 
diff --git a/src/contexts.cc b/src/contexts.cc
index 7e67125..f4d7733 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -306,10 +306,15 @@
 
 
 #ifdef DEBUG
-bool Context::IsBootstrappingOrContext(Object* object) {
+bool Context::IsBootstrappingOrValidParentContext(
+    Object* object, Context* child) {
   // During bootstrapping we allow all objects to pass as
   // contexts. This is necessary to fix circular dependencies.
-  return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
+  if (Isolate::Current()->bootstrapper()->IsActive()) return true;
+  if (!object->IsContext()) return false;
+  Context* context = Context::cast(object);
+  return context->IsGlobalContext() || context->IsModuleContext() ||
+         !child->IsModuleContext();
 }
 
 
diff --git a/src/contexts.h b/src/contexts.h
index d154b82..e0d8a14 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -190,6 +190,8 @@
 //                Dynamically declared variables/functions are also added
 //                to lazily allocated extension object. Context::Lookup
 //                searches the extension object for properties.
+//                For block contexts, contains the respective ScopeInfo.
+//                For module contexts, points back to the respective JSModule.
 //
 // [ global    ]  A pointer to the global object. Provided for quick
 //                access to the global object from inside the code (since
@@ -217,7 +219,7 @@
     // The extension slot is used for either the global object (in global
     // contexts), eval extension object (function contexts), subject of with
     // (with contexts), or the variable name (catch contexts), the serialized
-    // scope info (block contexts).
+    // scope info (block contexts), or the module instance (module contexts).
     EXTENSION_INDEX,
     GLOBAL_INDEX,
     MIN_CONTEXT_SLOTS,
@@ -303,7 +305,7 @@
 
   Context* previous() {
     Object* result = unchecked_previous();
-    ASSERT(IsBootstrappingOrContext(result));
+    ASSERT(IsBootstrappingOrValidParentContext(result, this));
     return reinterpret_cast<Context*>(result);
   }
   void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
@@ -312,6 +314,9 @@
   Object* extension() { return get(EXTENSION_INDEX); }
   void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
 
+  JSModule* module() { return JSModule::cast(get(EXTENSION_INDEX)); }
+  void set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
+
   // Get the context where var declarations will be hoisted to, which
   // may be the context itself.
   Context* declaration_context();
@@ -426,7 +431,7 @@
 
 #ifdef DEBUG
   // Bootstrapping-aware type checks.
-  static bool IsBootstrappingOrContext(Object* object);
+  static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
   static bool IsBootstrappingOrGlobalObject(Object* object);
 #endif
 };
diff --git a/src/debug.cc b/src/debug.cc
index 40ddabb..da92815 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -892,27 +892,6 @@
 }
 
 
-// TODO(131642): Remove this when fixed.
-void Debug::PutValuesOnStackAndDie(int start,
-                                   Address c_entry_fp,
-                                   Address last_fp,
-                                   Address larger_fp,
-                                   int count,
-                                   char* stack,
-                                   int end) {
-  OS::PrintError("start:       %d\n", start);
-  OS::PrintError("c_entry_fp:  %p\n", static_cast<void*>(c_entry_fp));
-  OS::PrintError("last_fp:     %p\n", static_cast<void*>(last_fp));
-  OS::PrintError("larger_fp:   %p\n", static_cast<void*>(larger_fp));
-  OS::PrintError("count:       %d\n", count);
-  if (stack != NULL) {
-    OS::PrintError("stack:       %s\n", stack);
-  }
-  OS::PrintError("end:         %d\n", end);
-  OS::Abort();
-}
-
-
 Object* Debug::Break(Arguments args) {
   Heap* heap = isolate_->heap();
   HandleScope scope(isolate_);
@@ -1010,53 +989,16 @@
         it.Advance();
       }
 
-      // TODO(131642): Remove this when fixed.
-      // Catch the cases that would lead to crashes and capture
-      // - C entry FP at which to start stack crawl.
-      // - FP of the frame at which we plan to stop stepping out (last FP).
-      // - current FP that's larger than last FP.
-      // - Counter for the number of steps to step out.
-      // - stack trace string.
-      if (it.done()) {
-        // We crawled the entire stack, never reaching last_fp_.
-        Handle<String> stack = isolate_->StackTraceString();
-        char buffer[8192];
-        int length = Min(8192, stack->length());
-        String::WriteToFlat(*stack, buffer, 0, length - 1);
-        PutValuesOnStackAndDie(0xBEEEEEEE,
-                               frame->fp(),
-                               thread_local_.last_fp_,
-                               reinterpret_cast<Address>(0xDEADDEAD),
-                               count,
-                               buffer,
-                               0xCEEEEEEE);
-      } else if (it.frame()->fp() != thread_local_.last_fp_) {
-        // We crawled over last_fp_, without getting a match.
-        Handle<String> stack = isolate_->StackTraceString();
-        char buffer[8192];
-        int length = Min(8192, stack->length());
-        String::WriteToFlat(*stack, buffer, 0, length - 1);
-        PutValuesOnStackAndDie(0xDEEEEEEE,
-                               frame->fp(),
-                               thread_local_.last_fp_,
-                               it.frame()->fp(),
-                               count,
-                               buffer,
-                               0xFEEEEEEE);
+      // Check that we indeed found the frame we are looking for.
+      CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
+      if (step_count > 1) {
+        // Save old count and action to continue stepping after StepOut.
+        thread_local_.queued_step_count_ = step_count - 1;
       }
 
-      // If we found original frame
-      if (it.frame()->fp() == thread_local_.last_fp_) {
-        if (step_count > 1) {
-          // Save old count and action to continue stepping after
-          // StepOut
-          thread_local_.queued_step_count_ = step_count - 1;
-        }
-
-        // Set up for StepOut to reach target frame
-        step_action = StepOut;
-        step_count = count;
-      }
+      // Set up for StepOut to reach target frame.
+      step_action = StepOut;
+      step_count = count;
     }
 
     // Clear all current stepping setup.
diff --git a/src/debug.h b/src/debug.h
index f3215c9..bb80420 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -232,14 +232,6 @@
   void PreemptionWhileInDebugger();
   void Iterate(ObjectVisitor* v);
 
-  // TODO(131642): Remove this when fixed.
-  NO_INLINE(void PutValuesOnStackAndDie(int start,
-                                        Address c_entry_fp,
-                                        Address last_fp,
-                                        Address larger_fp,
-                                        int count,
-                                        char* stack,
-                                        int end));
   Object* Break(Arguments args);
   void SetBreakPoint(Handle<JSFunction> function,
                      Handle<Object> break_point_object,
diff --git a/src/execution.cc b/src/execution.cc
index 5618975..40ed7de 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -132,6 +132,12 @@
         V8::FatalProcessOutOfMemory("JS", true);
       }
     }
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    // Reset stepping state when script exits with uncaught exception.
+    if (isolate->debugger()->IsDebuggerActive()) {
+      isolate->debug()->ClearStepping();
+    }
+#endif  // ENABLE_DEBUGGER_SUPPORT
     return Handle<Object>();
   } else {
     isolate->clear_pending_message();
diff --git a/src/factory.cc b/src/factory.cc
index 3dd7a51..e7c612b 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -293,11 +293,10 @@
 }
 
 
-Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
-                                          Handle<ScopeInfo> scope_info) {
+Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
+      isolate()->heap()->AllocateModuleContext(*scope_info),
       Context);
 }
 
@@ -976,10 +975,11 @@
 }
 
 
-Handle<JSModule> Factory::NewJSModule() {
+Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
+                                      Handle<ScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateJSModule(), JSModule);
+      isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule);
 }
 
 
diff --git a/src/factory.h b/src/factory.h
index bb43545..0485fdd 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -163,8 +163,7 @@
   Handle<Context> NewGlobalContext();
 
   // Create a module context.
-  Handle<Context> NewModuleContext(Handle<Context> previous,
-                                   Handle<ScopeInfo> scope_info);
+  Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
 
   // Create a function context.
   Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
@@ -267,7 +266,8 @@
   Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
 
   // JS modules are pretenured.
-  Handle<JSModule> NewJSModule();
+  Handle<JSModule> NewJSModule(Handle<Context> context,
+                               Handle<ScopeInfo> scope_info);
 
   // JS arrays are pretenured when allocated by the parser.
   Handle<JSArray> NewJSArray(
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 98a4fce..607b0c3 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -198,7 +198,7 @@
 DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
 DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
 DEFINE_bool(use_osr, true, "use on-stack replacement")
-DEFINE_bool(array_bounds_checks_elimination, false,
+DEFINE_bool(array_bounds_checks_elimination, true,
             "perform array bounds checks elimination")
 DEFINE_bool(array_index_dehoisting, false,
             "perform array index dehoisting")
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index e286b80..de32be1 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -589,27 +589,20 @@
 
 
 void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
-  Handle<JSModule> instance = module->interface()->Instance();
-  ASSERT(!instance.is_null());
-
   // Allocate a module context statically.
   Block* block = module->body();
   Scope* saved_scope = scope();
   scope_ = block->scope();
-  Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+  Interface* interface = module->interface();
+  Handle<JSModule> instance = interface->Instance();
 
-  // Generate code for module creation and linking.
   Comment cmnt(masm_, "[ ModuleLiteral");
   SetStatementPosition(block);
 
-  if (scope_info->HasContext()) {
-    // Set up module context.
-    __ Push(scope_info);
-    __ Push(instance);
-    __ CallRuntime(Runtime::kPushModuleContext, 2);
-    StoreToFrameField(
-        StandardFrameConstants::kContextOffset, context_register());
-  }
+  // Set up module context.
+  __ Push(instance);
+  __ CallRuntime(Runtime::kPushModuleContext, 1);
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   {
     Comment cmnt(masm_, "[ Declarations");
@@ -617,42 +610,21 @@
   }
 
   scope_ = saved_scope;
-  if (scope_info->HasContext()) {
-    // Pop module context.
-    LoadContextField(context_register(), Context::PREVIOUS_INDEX);
-    // Update local stack frame context field.
-    StoreToFrameField(
-        StandardFrameConstants::kContextOffset, context_register());
-  }
-
-  // Populate module instance object.
-  const PropertyAttributes attr =
-      static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
-  for (Interface::Iterator it = module->interface()->iterator();
-       !it.done(); it.Advance()) {
-    if (it.interface()->IsModule()) {
-      Handle<Object> value = it.interface()->Instance();
-      ASSERT(!value.is_null());
-      JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
-    } else {
-      // TODO(rossberg): set proper getters instead of undefined...
-      // instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
-      Handle<Object> value(isolate()->heap()->undefined_value());
-      JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
-    }
-  }
-  USE(instance->PreventExtensions());
+  // Pop module context.
+  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+  // Update local stack frame context field.
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 }
 
 
 void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
-  // Noting to do.
+  // Nothing to do.
   // The instance object is resolved statically through the module's interface.
 }
 
 
 void FullCodeGenerator::VisitModulePath(ModulePath* module) {
-  // Noting to do.
+  // Nothing to do.
   // The instance object is resolved statically through the module's interface.
 }
 
@@ -916,25 +888,36 @@
   Scope* saved_scope = scope();
   // Push a block context when entering a block with block scoped variables.
   if (stmt->scope() != NULL) {
-    { Comment cmnt(masm_, "[ Extend block context");
-      scope_ = stmt->scope();
-      Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
-      int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
-      __ Push(scope_info);
-      PushFunctionArgumentForContextAllocation();
-      if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
-        FastNewBlockContextStub stub(heap_slots);
-        __ CallStub(&stub);
-      } else {
-        __ CallRuntime(Runtime::kPushBlockContext, 2);
-      }
+    scope_ = stmt->scope();
+    if (scope_->is_module_scope()) {
+      // If this block is a module body, then we have already allocated and
+      // initialized the declarations earlier. Just push the context.
+      ASSERT(!scope_->interface()->Instance().is_null());
+      __ Push(scope_->interface()->Instance());
+      __ CallRuntime(Runtime::kPushModuleContext, 1);
+      StoreToFrameField(
+          StandardFrameConstants::kContextOffset, context_register());
+    } else {
+      { Comment cmnt(masm_, "[ Extend block context");
+        Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+        int heap_slots =
+            scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+        __ Push(scope_info);
+        PushFunctionArgumentForContextAllocation();
+        if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+          FastNewBlockContextStub stub(heap_slots);
+          __ CallStub(&stub);
+        } else {
+          __ CallRuntime(Runtime::kPushBlockContext, 2);
+        }
 
-      // Replace the context stored in the frame.
-      StoreToFrameField(StandardFrameConstants::kContextOffset,
-                        context_register());
-    }
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope_->declarations());
+        // Replace the context stored in the frame.
+        StoreToFrameField(StandardFrameConstants::kContextOffset,
+                          context_register());
+      }
+      { Comment cmnt(masm_, "[ Declarations");
+        VisitDeclarations(scope_->declarations());
+      }
     }
   }
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
diff --git a/src/handles.h b/src/handles.h
index aca8690..6f3044c 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -246,7 +246,7 @@
 // if none exists.
 Handle<JSValue> GetScriptWrapper(Handle<Script> script);
 
-// Script line number computations.
+// Script line number computations. Note that the line number is zero-based.
 void InitScriptLineEnds(Handle<Script> script);
 // For string calculates an array of line end positions. If the string
 // does not end with a new line character, this character may optionally be
diff --git a/src/heap.cc b/src/heap.cc
index 7d655b4..1b4ec81 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2082,7 +2082,7 @@
   map->set_pre_allocated_property_fields(0);
   map->init_instance_descriptors();
   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  map->init_prototype_transitions(undefined_value());
+  map->init_back_pointer(undefined_value());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
@@ -2221,15 +2221,15 @@
   // Fix the instance_descriptors for the existing maps.
   meta_map()->init_instance_descriptors();
   meta_map()->set_code_cache(empty_fixed_array());
-  meta_map()->init_prototype_transitions(undefined_value());
+  meta_map()->init_back_pointer(undefined_value());
 
   fixed_array_map()->init_instance_descriptors();
   fixed_array_map()->set_code_cache(empty_fixed_array());
-  fixed_array_map()->init_prototype_transitions(undefined_value());
+  fixed_array_map()->init_back_pointer(undefined_value());
 
   oddball_map()->init_instance_descriptors();
   oddball_map()->set_code_cache(empty_fixed_array());
-  oddball_map()->init_prototype_transitions(undefined_value());
+  oddball_map()->init_back_pointer(undefined_value());
 
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
@@ -4006,13 +4006,18 @@
 }
 
 
-MaybeObject* Heap::AllocateJSModule() {
+MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
   // Allocate a fresh map. Modules do not have a prototype.
   Map* map;
   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
   if (!maybe_map->To(&map)) return maybe_map;
   // Allocate the object based on the map.
-  return AllocateJSObjectFromMap(map, TENURED);
+  JSModule* module;
+  MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
+  if (!maybe_module->To(&module)) return maybe_module;
+  module->set_context(context);
+  module->set_scope_info(scope_info);
+  return module;
 }
 
 
@@ -4911,18 +4916,16 @@
 }
 
 
-MaybeObject* Heap::AllocateModuleContext(Context* previous,
-                                         ScopeInfo* scope_info) {
+MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
   Object* result;
   { MaybeObject* maybe_result =
-        AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
+        AllocateFixedArray(scope_info->ContextLength(), TENURED);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
   context->set_map_no_write_barrier(module_context_map());
-  context->set_previous(previous);
-  context->set_extension(scope_info);
-  context->set_global(previous->global());
+  // Context links will be set later.
+  context->set_extension(Smi::FromInt(0));
   return context;
 }
 
@@ -4937,7 +4940,7 @@
   context->set_map_no_write_barrier(function_context_map());
   context->set_closure(function);
   context->set_previous(function->context());
-  context->set_extension(NULL);
+  context->set_extension(Smi::FromInt(0));
   context->set_global(function->context()->global());
   return context;
 }
diff --git a/src/heap.h b/src/heap.h
index e9925c2..8d9adb0 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -530,7 +530,8 @@
   MUST_USE_RESULT MaybeObject* AllocateJSObject(
       JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
 
-  MUST_USE_RESULT MaybeObject* AllocateJSModule();
+  MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
+                                                ScopeInfo* scope_info);
 
   // Allocate a JSArray with no elements
   MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
@@ -824,8 +825,7 @@
   MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
 
   // Allocate a module context.
-  MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
-                                                     ScopeInfo* scope_info);
+  MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info);
 
   // Allocate a function context.
   MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 70ed782..283e484 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1073,6 +1073,11 @@
 }
 
 
+void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
+  stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder());
+}
+
+
 void HCallStub::PrintDataTo(StringStream* stream) {
   stream->Add("%s ",
               CodeStub::MajorName(major_key_, false));
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index e92d747..fb6fe29 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -2281,6 +2281,8 @@
     return Representation::None();
   }
 
+  virtual void PrintDataTo(StringStream* stream);
+
   virtual intptr_t Hashcode() {
     ASSERT(!HEAP->IsAllocationAllowed());
     intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index dfde504..3ce76c1 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -3266,7 +3266,8 @@
   int32_t LowerOffset() const { return lower_offset_; }
   int32_t UpperOffset() const { return upper_offset_; }
   HBasicBlock* BasicBlock() const { return basic_block_; }
-  HBoundsCheck* Check() const { return check_; }
+  HBoundsCheck* LowerCheck() const { return lower_check_; }
+  HBoundsCheck* UpperCheck() const { return upper_check_; }
   BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
   BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
 
@@ -3274,76 +3275,85 @@
     return offset >= LowerOffset() && offset <= UpperOffset();
   }
 
-  // This method removes new_check and modifies the current check so that it
-  // also "covers" what new_check covered.
-  // The obvious precondition is that new_check follows Check() in the
-  // same basic block, and that new_offset is not covered (otherwise we
-  // could simply remove new_check).
-  // As a consequence LowerOffset() or UpperOffset() change (the covered
+  bool HasSingleCheck() { return lower_check_ == upper_check_; }
+
+  // The goal of this method is to modify either upper_offset_ or
+  // lower_offset_ so that also new_offset is covered (the covered
   // range grows).
   //
-  // In the general case the check covering the current range should be like
-  // these two checks:
-  // 0 <= Key()->IndexBase() + LowerOffset()
-  // Key()->IndexBase() + UpperOffset() < Key()->Length()
+  // The precondition is that new_check follows UpperCheck() and
+  // LowerCheck() in the same basic block, and that new_offset is not
+  // covered (otherwise we could simply remove new_check).
   //
-  // We can transform the second check like this:
-  // Key()->IndexBase() + LowerOffset() <
-  //     Key()->Length() + (LowerOffset() - UpperOffset())
-  // so we can handle both checks with a single unsigned comparison.
-  //
-  // The bulk of this method changes Check()->index() and Check()->length()
-  // replacing them with new HAdd instructions to perform the transformation
-  // described above.
+  // If HasSingleCheck() is true then new_check is added as "second check"
+  // (either upper or lower; note that HasSingleCheck() becomes false).
+  // Otherwise one of the current checks is modified so that it also covers
+  // new_offset, and new_check is removed.
   void CoverCheck(HBoundsCheck* new_check,
                   int32_t new_offset) {
     ASSERT(new_check->index()->representation().IsInteger32());
+    bool keep_new_check = false;
 
     if (new_offset > upper_offset_) {
       upper_offset_ = new_offset;
+      if (HasSingleCheck()) {
+        keep_new_check = true;
+        upper_check_ = new_check;
+      } else {
+        BuildOffsetAdd(upper_check_,
+                       &added_upper_index_,
+                       &added_upper_offset_,
+                       Key()->IndexBase(),
+                       new_check->index()->representation(),
+                       new_offset);
+        upper_check_->SetOperandAt(0, added_upper_index_);
+      }
     } else if (new_offset < lower_offset_) {
       lower_offset_ = new_offset;
+      if (HasSingleCheck()) {
+        keep_new_check = true;
+        lower_check_ = new_check;
+      } else {
+        BuildOffsetAdd(lower_check_,
+                       &added_lower_index_,
+                       &added_lower_offset_,
+                       Key()->IndexBase(),
+                       new_check->index()->representation(),
+                       new_offset);
+        lower_check_->SetOperandAt(0, added_lower_index_);
+      }
     } else {
       ASSERT(false);
     }
 
-    BuildOffsetAdd(&added_index_,
-                   &added_index_offset_,
-                   Key()->IndexBase(),
-                   new_check->index()->representation(),
-                   lower_offset_);
-    Check()->SetOperandAt(0, added_index_);
-    BuildOffsetAdd(&added_length_,
-                   &added_length_offset_,
-                   Key()->Length(),
-                   new_check->length()->representation(),
-                   lower_offset_ - upper_offset_);
-    Check()->SetOperandAt(1, added_length_);
-
-    new_check->DeleteAndReplaceWith(NULL);
+    if (!keep_new_check) {
+      new_check->DeleteAndReplaceWith(NULL);
+    }
   }
 
   void RemoveZeroOperations() {
-    RemoveZeroAdd(&added_index_, &added_index_offset_);
-    RemoveZeroAdd(&added_length_, &added_length_offset_);
+    RemoveZeroAdd(&added_lower_index_, &added_lower_offset_);
+    RemoveZeroAdd(&added_upper_index_, &added_upper_offset_);
   }
 
   BoundsCheckBbData(BoundsCheckKey* key,
                     int32_t lower_offset,
                     int32_t upper_offset,
                     HBasicBlock* bb,
-                    HBoundsCheck* check,
+                    HBoundsCheck* lower_check,
+                    HBoundsCheck* upper_check,
                     BoundsCheckBbData* next_in_bb,
                     BoundsCheckBbData* father_in_dt)
   : key_(key),
     lower_offset_(lower_offset),
     upper_offset_(upper_offset),
     basic_block_(bb),
-    check_(check),
-    added_index_offset_(NULL),
-    added_index_(NULL),
-    added_length_offset_(NULL),
-    added_length_(NULL),
+    lower_check_(lower_check),
+    upper_check_(upper_check),
+    added_lower_index_(NULL),
+    added_lower_offset_(NULL),
+    added_upper_index_(NULL),
+    added_upper_offset_(NULL),
     next_in_bb_(next_in_bb),
     father_in_dt_(father_in_dt) { }
 
@@ -3352,15 +3362,17 @@
   int32_t lower_offset_;
   int32_t upper_offset_;
   HBasicBlock* basic_block_;
-  HBoundsCheck* check_;
-  HConstant* added_index_offset_;
-  HAdd* added_index_;
-  HConstant* added_length_offset_;
-  HAdd* added_length_;
+  HBoundsCheck* lower_check_;
+  HBoundsCheck* upper_check_;
+  HAdd* added_lower_index_;
+  HConstant* added_lower_offset_;
+  HAdd* added_upper_index_;
+  HConstant* added_upper_offset_;
   BoundsCheckBbData* next_in_bb_;
   BoundsCheckBbData* father_in_dt_;
 
-  void BuildOffsetAdd(HAdd** add,
+  void BuildOffsetAdd(HBoundsCheck* check,
+                      HAdd** add,
                       HConstant** constant,
                       HValue* original_value,
                       Representation representation,
@@ -3369,12 +3381,12 @@
         HConstant(Handle<Object>(Smi::FromInt(new_offset)),
                   Representation::Integer32());
     if (*add == NULL) {
-      new_constant->InsertBefore(Check());
+      new_constant->InsertBefore(check);
       *add = new(BasicBlock()->zone()) HAdd(NULL,
                                             original_value,
                                             new_constant);
       (*add)->AssumeRepresentation(representation);
-      (*add)->InsertBefore(Check());
+      (*add)->InsertBefore(check);
     } else {
       new_constant->InsertBefore(*add);
       (*constant)->DeleteAndReplaceWith(new_constant);
@@ -3447,6 +3459,7 @@
                                                    offset,
                                                    bb,
                                                    check,
+                                                   check,
                                                    bb_data_list,
                                                    NULL);
       *data_p = bb_data_list;
@@ -3465,7 +3478,8 @@
                                                    new_lower_offset,
                                                    new_upper_offset,
                                                    bb,
-                                                   check,
+                                                   data->LowerCheck(),
+                                                   data->UpperCheck(),
                                                    bb_data_list,
                                                    data);
       table->Insert(key, bb_data_list, zone());
@@ -5623,6 +5637,33 @@
 }
 
 
+static void LookupInPrototypes(Handle<Map> map,
+                               Handle<String> name,
+                               LookupResult* lookup) {
+  while (map->prototype()->IsJSObject()) {
+    Handle<JSObject> holder(JSObject::cast(map->prototype()));
+    map = Handle<Map>(holder->map());
+    map->LookupDescriptor(*holder, *name, lookup);
+    if (lookup->IsFound()) return;
+  }
+  lookup->NotFound();
+}
+
+
+HInstruction* HGraphBuilder::BuildCallGetter(HValue* obj,
+                                             Property* expr,
+                                             Handle<Map> map,
+                                             Handle<Object> callback,
+                                             Handle<JSObject> holder) {
+  if (!callback->IsAccessorPair()) return BuildLoadNamedGeneric(obj, expr);
+  Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter());
+  Handle<JSFunction> function(Handle<JSFunction>::cast(getter));
+  AddCheckConstantFunction(holder, obj, map, true);
+  AddInstruction(new(zone()) HPushArgument(obj));
+  return new(zone()) HCallConstantFunction(function, 1);
+}
+
+
 HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
                                             Property* expr,
                                             Handle<Map> map,
@@ -5640,7 +5681,17 @@
     AddInstruction(HCheckMaps::NewWithTransitions(obj, map, zone()));
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
     return new(zone()) HConstant(function, Representation::Tagged());
+  } else if (lookup.IsPropertyCallbacks()) {
+    Handle<Object> callback(lookup.GetValueFromMap(*map));
+    Handle<JSObject> holder;
+    return BuildCallGetter(obj, expr, map, callback, holder);
   } else {
+    LookupInPrototypes(map, name, &lookup);
+    if (lookup.IsPropertyCallbacks()) {
+      Handle<Object> callback(lookup.GetValue());
+      Handle<JSObject> holder(lookup.holder());
+      return BuildCallGetter(obj, expr, map, callback, holder);
+    }
     return BuildLoadNamedGeneric(obj, expr);
   }
 }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 85977c1..ac19db7 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1138,6 +1138,11 @@
                                    bool is_store,
                                    bool* has_side_effects);
 
+  HInstruction* BuildCallGetter(HValue* obj,
+                                Property* expr,
+                                Handle<Map> map,
+                                Handle<Object> callback,
+                                Handle<JSObject> holder);
   HInstruction* BuildLoadNamed(HValue* object,
                                Property* prop,
                                Handle<Map> map,
diff --git a/src/ic.cc b/src/ic.cc
index 615f358..bee29f0 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -2589,7 +2589,8 @@
 
 Token::Value CompareIC::ComputeOperation(Code* target) {
   ASSERT(target->major_key() == CodeStub::CompareIC);
-  return static_cast<Token::Value>(target->compare_operation());
+  return static_cast<Token::Value>(
+      target->compare_operation() + Token::EQ);
 }
 
 
@@ -2599,7 +2600,7 @@
     case SMIS: return "SMIS";
     case HEAP_NUMBERS: return "HEAP_NUMBERS";
     case OBJECTS: return "OBJECTS";
-    case KNOWN_OBJECTS: return "OBJECTS";
+    case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
     case SYMBOLS: return "SYMBOLS";
     case STRINGS: return "STRINGS";
     case GENERIC: return "GENERIC";
diff --git a/src/isolate.cc b/src/isolate.cc
index 30c60b6..766c072 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1136,12 +1136,12 @@
             "Extension or internal compilation error: %s in %s at line %d.\n",
             *String::cast(exception)->ToCString(),
             *String::cast(location->script()->name())->ToCString(),
-            line_number);
+            line_number + 1);
       } else {
         OS::PrintError(
             "Extension or internal compilation error in %s at line %d.\n",
             *String::cast(location->script()->name())->ToCString(),
-            line_number);
+            line_number + 1);
       }
     }
   }
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a9d0c16..e75dc49 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1823,24 +1823,6 @@
 
 template <class T>
 void Marker<T>::MarkMapContents(Map* map) {
-  // Mark prototype transitions array but don't push it into marking stack.
-  // This will make references from it weak. We will clean dead prototype
-  // transitions in ClearNonLiveTransitions.
-  Object** proto_trans_slot =
-      HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
-  HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
-  if (prototype_transitions->IsFixedArray()) {
-    mark_compact_collector()->RecordSlot(proto_trans_slot,
-                                         proto_trans_slot,
-                                         prototype_transitions);
-    MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
-    if (!mark.Get()) {
-      mark.Set();
-      MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
-                                            prototype_transitions->Size());
-    }
-  }
-
   // Make sure that the back pointer stored either in the map itself or inside
   // its prototype transitions array is marked. Treat pointers in the descriptor
   // array as weak and also mark that array to prevent visiting it later.
@@ -1855,13 +1837,12 @@
 
   // Mark the Object* fields of the Map. Since the descriptor array has been
   // marked already, it is fine that one of these fields contains a pointer
-  // to it. But make sure to skip back pointer and prototype transitions.
+  // to it. But make sure to skip back pointer.
   STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
-      Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
-  Object** start_slot = HeapObject::RawField(
-      map, Map::kPointerFieldsBeginOffset);
-  Object** end_slot = HeapObject::RawField(
-      map, Map::kPrototypeTransitionsOrBackPointerOffset);
+                Map::kBackPointerOffset + kPointerSize);
+  Object** start_slot =
+      HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
+  Object** end_slot = HeapObject::RawField(map, Map::kBackPointerOffset);
   for (Object** slot = start_slot; slot < end_slot; slot++) {
     Object* obj = *slot;
     if (!obj->NonFailureIsHeapObject()) continue;
@@ -1952,9 +1933,21 @@
   Object** transitions_start = transitions->data_start();
 
   if (transitions->HasElementsTransition()) {
-    mark_compact_collector()->RecordSlot(transitions_start,
-                                         transitions->GetElementsSlot(),
-                                         transitions->elements_transition());
+    mark_compact_collector()->RecordSlot(
+        transitions_start,
+        transitions->GetElementsTransitionSlot(),
+        transitions->elements_transition());
+  }
+
+  if (transitions->HasPrototypeTransitions()) {
+    // Mark prototype transitions array but don't push it into marking stack.
+    // This will make references from it weak. We will clean dead prototype
+    // transitions in ClearNonLiveTransitions.
+    Object** proto_trans_slot = transitions->GetPrototypeTransitionsSlot();
+    HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
+    base_marker()->MarkObjectWithoutPush(prototype_transitions);
+    mark_compact_collector()->RecordSlot(
+        transitions_start, proto_trans_slot, prototype_transitions);
   }
 
   for (int i = 0; i < transitions->number_of_transitions(); ++i) {
@@ -2545,7 +2538,7 @@
 
 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
   int number_of_transitions = map->NumberOfProtoTransitions();
-  FixedArray* prototype_transitions = map->prototype_transitions();
+  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
 
   int new_number_of_transitions = 0;
   const int header = Map::kProtoTransitionHeaderSize;
diff --git a/src/messages.js b/src/messages.js
index d91c251..0701c70 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -50,7 +50,12 @@
         try {
           str = ToDetailString(args[arg_num]);
         } catch (e) {
-          str = "#<error>";
+          if (%IsJSModule(args[arg_num]))
+            str = "module";
+          else if (IS_SPEC_OBJECT(args[arg_num]))
+            str = "object";
+          else
+            str = "#<error>";
         }
       }
     }
@@ -251,6 +256,7 @@
       "harmony_const_assign",         ["Assignment to constant variable."],
       "invalid_module_path",          ["Module does not export '", "%0", "', or export is not itself a module"],
       "module_type_error",            ["Module '", "%0", "' used improperly"],
+      "module_export_undefined",      ["Export '", "%0", "' is not defined in module"],
     ];
     var messages = { __proto__ : null };
     for (var i = 0; i < messagesDictionary.length; i += 2) {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 6d54d3a..e40fe51 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -374,11 +374,9 @@
 
 
 void JSModule::JSModuleVerify() {
-  Object* v = context();
-  if (v->IsHeapObject()) {
-    VerifyHeapPointer(v);
-  }
-  CHECK(v->IsUndefined() || v->IsModuleContext());
+  VerifyObjectField(kContextOffset);
+  VerifyObjectField(kScopeInfoOffset);
+  CHECK(context()->IsUndefined() || context()->IsModuleContext());
 }
 
 
@@ -1008,7 +1006,6 @@
 
 void Map::ZapTransitions() {
   TransitionArray* transition_array = transitions();
-  if (transition_array == NULL) return;
   MemsetPointer(transition_array->data_start(),
                 GetHeap()->the_hole_value(),
                 transition_array->length());
@@ -1016,7 +1013,7 @@
 
 
 void Map::ZapPrototypeTransitions() {
-  FixedArray* proto_transitions = prototype_transitions();
+  FixedArray* proto_transitions = GetPrototypeTransitions();
   MemsetPointer(proto_transitions->data_start(),
                 GetHeap()->the_hole_value(),
                 proto_transitions->length());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index af5e683..06c3405 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3052,7 +3052,8 @@
          kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC ||
          kind() == TO_BOOLEAN_IC);
-  return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
+  return StubMajorKeyField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
 
 
@@ -3063,7 +3064,9 @@
          kind() == COMPARE_IC ||
          kind() == TO_BOOLEAN_IC);
   ASSERT(0 <= major && major < 256);
-  WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+  int updated = StubMajorKeyField::update(previous, major);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
 }
 
 
@@ -3165,39 +3168,50 @@
 
 unsigned Code::stack_slots() {
   ASSERT(kind() == OPTIMIZED_FUNCTION);
-  return READ_UINT32_FIELD(this, kStackSlotsOffset);
+  return StackSlotsField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_stack_slots(unsigned slots) {
+  CHECK(slots <= (1 << kStackSlotsBitCount));
   ASSERT(kind() == OPTIMIZED_FUNCTION);
-  WRITE_UINT32_FIELD(this, kStackSlotsOffset, slots);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = StackSlotsField::update(previous, slots);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 unsigned Code::safepoint_table_offset() {
   ASSERT(kind() == OPTIMIZED_FUNCTION);
-  return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
+  return SafepointTableOffsetField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
 
 
 void Code::set_safepoint_table_offset(unsigned offset) {
+  CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
   ASSERT(kind() == OPTIMIZED_FUNCTION);
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
-  WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+  int updated = SafepointTableOffsetField::update(previous, offset);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
 }
 
 
 unsigned Code::stack_check_table_offset() {
   ASSERT_EQ(FUNCTION, kind());
-  return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
+  return StackCheckTableOffsetField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
 
 
 void Code::set_stack_check_table_offset(unsigned offset) {
   ASSERT_EQ(FUNCTION, kind());
   ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
-  WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+  int updated = StackCheckTableOffsetField::update(previous, offset);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
 }
 
 
@@ -3216,85 +3230,106 @@
 
 byte Code::unary_op_type() {
   ASSERT(is_unary_op_stub());
-  return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
+  return UnaryOpTypeField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_unary_op_type(byte value) {
   ASSERT(is_unary_op_stub());
-  WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = UnaryOpTypeField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 byte Code::binary_op_type() {
   ASSERT(is_binary_op_stub());
-  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+  return BinaryOpTypeField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_binary_op_type(byte value) {
   ASSERT(is_binary_op_stub());
-  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = BinaryOpTypeField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 byte Code::binary_op_result_type() {
   ASSERT(is_binary_op_stub());
-  return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
+  return BinaryOpResultTypeField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_binary_op_result_type(byte value) {
   ASSERT(is_binary_op_stub());
-  WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = BinaryOpResultTypeField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 byte Code::compare_state() {
   ASSERT(is_compare_ic_stub());
-  return READ_BYTE_FIELD(this, kCompareStateOffset);
+  return CompareStateField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_compare_state(byte value) {
   ASSERT(is_compare_ic_stub());
-  WRITE_BYTE_FIELD(this, kCompareStateOffset, value);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = CompareStateField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 byte Code::compare_operation() {
   ASSERT(is_compare_ic_stub());
-  return READ_BYTE_FIELD(this, kCompareOperationOffset);
+  return CompareOperationField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_compare_operation(byte value) {
   ASSERT(is_compare_ic_stub());
-  WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = CompareOperationField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 byte Code::to_boolean_state() {
   ASSERT(is_to_boolean_ic_stub());
-  return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
+  return ToBooleanStateField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_to_boolean_state(byte value) {
   ASSERT(is_to_boolean_ic_stub());
-  WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = ToBooleanStateField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 bool Code::has_function_cache() {
   ASSERT(kind() == STUB);
-  return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+  return HasFunctionCacheField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_has_function_cache(bool flag) {
   ASSERT(kind() == STUB);
-  WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = HasFunctionCacheField::update(previous, flag);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
@@ -3489,12 +3524,7 @@
 
 
 Object* Map::GetBackPointer() {
-  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
-  if (object->IsFixedArray()) {
-    return FixedArray::cast(object)->get(kProtoTransitionBackPointerOffset);
-  } else {
-    return object;
-  }
+  return READ_FIELD(this, kBackPointerOffset);
 }
 
 
@@ -3532,9 +3562,9 @@
 }
 
 
-// If the descriptor does not have a transition array, install a new
-// transition array that has room for an element transition.
-static MaybeObject* AllowElementsTransition(Map* map) {
+// If the descriptor is using the empty transition array, install a new empty
+// transition array that will have place for an element transition.
+static MaybeObject* EnsureHasTransitionArray(Map* map) {
   if (map->HasTransitionArray()) return map;
 
   AllowTransitions(map);
@@ -3549,13 +3579,41 @@
 
 
 MaybeObject* Map::set_elements_transition_map(Map* transitioned_map) {
-  MaybeObject* allow_elements = AllowElementsTransition(this);
+  MaybeObject* allow_elements = EnsureHasTransitionArray(this);
   if (allow_elements->IsFailure()) return allow_elements;
   transitions()->set_elements_transition(transitioned_map);
   return this;
 }
 
 
+FixedArray* Map::GetPrototypeTransitions() {
+  if (!HasTransitionArray()) return GetHeap()->empty_fixed_array();
+  if (!transitions()->HasPrototypeTransitions()) {
+    return GetHeap()->empty_fixed_array();
+  }
+  return transitions()->GetPrototypeTransitions();
+}
+
+
+MaybeObject* Map::SetPrototypeTransitions(FixedArray* proto_transitions) {
+  MaybeObject* allow_prototype = EnsureHasTransitionArray(this);
+  if (allow_prototype->IsFailure()) return allow_prototype;
+#ifdef DEBUG
+  if (HasPrototypeTransitions()) {
+    ASSERT(GetPrototypeTransitions() != proto_transitions);
+    ZapPrototypeTransitions();
+  }
+#endif
+  transitions()->SetPrototypeTransitions(proto_transitions);
+  return this;
+}
+
+
+bool Map::HasPrototypeTransitions() {
+  return HasTransitionArray() && transitions()->HasPrototypeTransitions();
+}
+
+
 TransitionArray* Map::transitions() {
   return instance_descriptors()->transitions();
 }
@@ -3588,57 +3646,38 @@
 }
 
 
+void Map::init_back_pointer(Object* undefined) {
+  ASSERT(undefined->IsUndefined());
+  WRITE_FIELD(this, kBackPointerOffset, undefined);
+}
+
+
 void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
   Heap* heap = GetHeap();
   ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
   ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
          (value->IsMap() && GetBackPointer()->IsUndefined()));
-  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
-  if (object->IsFixedArray()) {
-    FixedArray::cast(object)->set(
-        kProtoTransitionBackPointerOffset, value, mode);
-  } else {
-    WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
-    CONDITIONAL_WRITE_BARRIER(
-        heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
-  }
+  WRITE_FIELD(this, kBackPointerOffset, value);
+  CONDITIONAL_WRITE_BARRIER(heap, this, kBackPointerOffset, value, mode);
 }
 
 
-FixedArray* Map::prototype_transitions() {
-  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
-  if (object->IsFixedArray()) {
-    return FixedArray::cast(object);
-  } else {
-    return GetHeap()->empty_fixed_array();
-  }
+// Can either be Smi (no transitions), normal transition array, or a transition
+// array with the header overwritten as a Smi (thus iterating).
+TransitionArray* Map::unchecked_transition_array() {
+  ASSERT(HasTransitionArray());
+  Object* object = *HeapObject::RawField(instance_descriptors(),
+                                         DescriptorArray::kTransitionsOffset);
+  ASSERT(!object->IsSmi());
+  TransitionArray* transition_array = static_cast<TransitionArray*>(object);
+  return transition_array;
 }
 
 
-void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
-  Heap* heap = GetHeap();
-  ASSERT(value != heap->empty_fixed_array());
-  value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
-#ifdef DEBUG
-  if (value != prototype_transitions()) {
-    ZapPrototypeTransitions();
-  }
-#endif
-  WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
-  CONDITIONAL_WRITE_BARRIER(
-      heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
-}
-
-
-void Map::init_prototype_transitions(Object* undefined) {
-  ASSERT(undefined->IsUndefined());
-  WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, undefined);
-}
-
-
-HeapObject* Map::unchecked_prototype_transitions() {
-  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
-  return reinterpret_cast<HeapObject*>(object);
+HeapObject* Map::UncheckedPrototypeTransitions() {
+  ASSERT(HasTransitionArray());
+  ASSERT(unchecked_transition_array()->HasPrototypeTransitions());
+  return unchecked_transition_array()->UncheckedPrototypeTransitions();
 }
 
 
@@ -3958,6 +3997,7 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
                kDontOptimize)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
 
 void SharedFunctionInfo::BeforeVisitingPointers() {
   if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
@@ -4408,6 +4448,7 @@
 
 
 ACCESSORS(JSModule, context, Object, kContextOffset)
+ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
 
 
 JSModule* JSModule::cast(Object* obj) {
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 679e9dc..c35ed5e 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -457,6 +457,8 @@
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
   PrintF(out, " - context = ");
   context()->Print(out);
+  PrintF(out, " - scope_info = ");
+  scope_info()->ShortPrint(out);
   PrintElementsKind(out, this->map()->elements_kind());
   PrintF(out, " {\n");
   PrintProperties(out);
diff --git a/src/objects.cc b/src/objects.cc
index 244e22b..cd2eebf 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2959,7 +2959,7 @@
       // transition to the target map.
       if (constant_function == *value) {
         self->set_map(transition_map);
-        return this;
+        return constant_function;
       }
       // Otherwise, replace with a map transition to a new map with a FIELD,
       // even if the value is a constant function.
@@ -5147,11 +5147,11 @@
 
   void Start() {
     ASSERT(!IsIterating());
-    if (HasTransitions()) *Header() = Smi::FromInt(0);
+    *Header() = Smi::FromInt(0);
   }
 
   bool IsIterating() {
-    return HasTransitions() && (*Header())->IsSmi();
+    return (*Header())->IsSmi();
   }
 
   Map* Next() {
@@ -5166,23 +5166,17 @@
   }
 
  private:
-  bool HasTransitions() {
-    return proto_trans_->map()->IsSmi() || proto_trans_->IsFixedArray();
-  }
-
   Object** Header() {
     return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
   }
 
   int NumberOfTransitions() {
-    ASSERT(HasTransitions());
     FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
     Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
     return Smi::cast(num)->value();
   }
 
   Map* GetTransition(int transitionNumber) {
-    ASSERT(HasTransitions());
     FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
     return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
   }
@@ -5232,42 +5226,41 @@
     return old_parent;
   }
 
-  // Can either be Smi (no instance descriptors), or a descriptor array with the
-  // header overwritten as a Smi (thus iterating).
-  TransitionArray* MutatedTransitions() {
-    Object* object = *HeapObject::RawField(instance_descriptors(),
-                                           DescriptorArray::kTransitionsOffset);
-    TransitionArray* transition_array = static_cast<TransitionArray*>(object);
-    return transition_array;
-  }
-
   // Start iterating over this map's children, possibly destroying a FixedArray
   // map (see explanation above).
   void ChildIteratorStart() {
     if (HasTransitionArray()) {
+      if (HasPrototypeTransitions()) {
+        IntrusivePrototypeTransitionIterator(GetPrototypeTransitions()).Start();
+      }
+
       IntrusiveMapTransitionIterator(transitions()).Start();
     }
-    IntrusivePrototypeTransitionIterator(
-        unchecked_prototype_transitions()).Start();
   }
 
   // If we have an unvisited child map, return that one and advance. If we have
   // none, return NULL and reset any destroyed FixedArray maps.
   TraversableMap* ChildIteratorNext() {
-    IntrusivePrototypeTransitionIterator
-        proto_iterator(unchecked_prototype_transitions());
-    if (proto_iterator.IsIterating()) {
-      Map* next = proto_iterator.Next();
-      if (next != NULL) return static_cast<TraversableMap*>(next);
-    }
     if (HasTransitionArray()) {
-      IntrusiveMapTransitionIterator
-          transitions_iterator(MutatedTransitions());
-      if (transitions_iterator.IsIterating()) {
-        Map* next = transitions_iterator.Next();
+      TransitionArray* transition_array = unchecked_transition_array();
+
+      if (transition_array->HasPrototypeTransitions()) {
+        HeapObject* proto_transitions =
+            transition_array->UncheckedPrototypeTransitions();
+        IntrusivePrototypeTransitionIterator proto_iterator(proto_transitions);
+        if (proto_iterator.IsIterating()) {
+          Map* next = proto_iterator.Next();
+          if (next != NULL) return static_cast<TraversableMap*>(next);
+        }
+      }
+
+      IntrusiveMapTransitionIterator transition_iterator(transition_array);
+      if (transition_iterator.IsIterating()) {
+        Map* next = transition_iterator.Next();
         if (next != NULL) return static_cast<TraversableMap*>(next);
       }
     }
+
     return NULL;
   }
 };
@@ -7417,7 +7410,9 @@
 
   // If the final transition array does not contain any live transitions, remove
   // the transition array from the map.
-  if (transition_index == 0 && !t->HasElementsTransition()) {
+  if (transition_index == 0 &&
+      !t->HasElementsTransition() &&
+      !t->HasPrototypeTransitions()) {
     return ClearTransitions();
   }
 
@@ -8835,7 +8830,7 @@
 
 
 Map* Map::GetPrototypeTransition(Object* prototype) {
-  FixedArray* cache = prototype_transitions();
+  FixedArray* cache = GetPrototypeTransitions();
   int number_of_transitions = NumberOfProtoTransitions();
   const int proto_offset =
       kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
@@ -8857,7 +8852,7 @@
   // Don't cache prototype transition if this map is shared.
   if (is_shared() || !FLAG_cache_prototype_transitions) return this;
 
-  FixedArray* cache = prototype_transitions();
+  FixedArray* cache = GetPrototypeTransitions();
 
   const int step = kProtoTransitionElementsPerEntry;
   const int header = kProtoTransitionHeaderSize;
@@ -8880,7 +8875,8 @@
       new_cache->set(i + header, cache->get(i + header));
     }
     cache = new_cache;
-    set_prototype_transitions(cache);
+    MaybeObject* set_result = SetPrototypeTransitions(cache);
+    if (set_result->IsFailure()) return set_result;
   }
 
   int last = transitions - 1;
diff --git a/src/objects.h b/src/objects.h
index c479a14..d7cd80f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -181,6 +181,8 @@
 // Instance size sentinel for objects of variable size.
 const int kVariableSizeSentinel = 0;
 
+const int kStubMajorKeyBits = 6;
+const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
 
 // All Maps have a field instance_type containing a InstanceType.
 // It describes the type of the instances.
@@ -3522,7 +3524,7 @@
   FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
 #undef DECL_INDEX
 #undef FOR_EACH_NUMERIC_FIELD
-  kVariablePartIndex
+    kVariablePartIndex
   };
 
   // The layout of the variable part of a ScopeInfo is as follows:
@@ -4520,28 +4522,20 @@
   static const int kICAgeOffset =
       kGCMetadataOffset + kPointerSize;
   static const int kFlagsOffset = kICAgeOffset + kIntSize;
-  static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
-  static const int kKindSpecificFlagsSize = 2 * kIntSize;
+  static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
+  static const int kKindSpecificFlags2Offset =
+      kKindSpecificFlags1Offset + kIntSize;
 
-  static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
-      kKindSpecificFlagsSize;
+  static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize;
 
   // Add padding to align the instruction start following right after
   // the Code object header.
   static const int kHeaderSize =
       (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
 
-  // Byte offsets within kKindSpecificFlagsOffset.
-  static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset;
-  static const int kOptimizableOffset = kKindSpecificFlagsOffset;
-  static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
-  static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
-
-  static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
-  static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
-  static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
-  static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
-  static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
+  // Byte offsets within kKindSpecificFlags1Offset.
+  static const int kOptimizableOffset = kKindSpecificFlags1Offset;
+  static const int kCheckTypeOffset = kKindSpecificFlags1Offset;
 
   static const int kFullCodeFlags = kOptimizableOffset + 1;
   class FullCodeFlagsHasDeoptimizationSupportField:
@@ -4549,16 +4543,9 @@
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
   class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
 
-  static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
-
-  static const int kCompareOperationOffset = kCompareStateOffset + 1;
-
   static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
   static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
 
-  static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
-  static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
-
   // Flags layout.  BitField<type, shift, size>.
   class ICStateField: public BitField<InlineCacheState, 0, 3> {};
   class TypeField: public BitField<StubType, 3, 3> {};
@@ -4567,6 +4554,77 @@
   class ExtraICStateField: public BitField<ExtraICState, 11, 2> {};
   class IsPregeneratedField: public BitField<bool, 13, 1> {};
 
+  // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
+  static const int kStackSlotsFirstBit = 0;
+  static const int kStackSlotsBitCount = 24;
+  static const int kUnaryOpTypeFirstBit =
+      kStackSlotsFirstBit + kStackSlotsBitCount;
+  static const int kUnaryOpTypeBitCount = 3;
+  static const int kBinaryOpTypeFirstBit =
+      kStackSlotsFirstBit + kStackSlotsBitCount;
+  static const int kBinaryOpTypeBitCount = 3;
+  static const int kBinaryOpResultTypeFirstBit =
+      kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
+  static const int kBinaryOpResultTypeBitCount = 3;
+  static const int kCompareStateFirstBit =
+      kStackSlotsFirstBit + kStackSlotsBitCount;
+  static const int kCompareStateBitCount = 3;
+  static const int kCompareOperationFirstBit =
+      kCompareStateFirstBit + kCompareStateBitCount;
+  static const int kCompareOperationBitCount = 4;
+  static const int kToBooleanStateFirstBit =
+      kStackSlotsFirstBit + kStackSlotsBitCount;
+  static const int kToBooleanStateBitCount = 8;
+  static const int kHasFunctionCacheFirstBit =
+      kStackSlotsFirstBit + kStackSlotsBitCount;
+  static const int kHasFunctionCacheBitCount = 1;
+
+  STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
+  STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
+  STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
+  STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
+                kBinaryOpResultTypeBitCount <= 32);
+  STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
+  STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
+  STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
+  STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
+
+  class StackSlotsField: public BitField<int,
+      kStackSlotsFirstBit, kStackSlotsBitCount> {};  // NOLINT
+  class UnaryOpTypeField: public BitField<int,
+      kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {};  // NOLINT
+  class BinaryOpTypeField: public BitField<int,
+      kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {};  // NOLINT
+  class BinaryOpResultTypeField: public BitField<int,
+      kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {};  // NOLINT
+  class CompareStateField: public BitField<int,
+      kCompareStateFirstBit, kCompareStateBitCount> {};  // NOLINT
+  class CompareOperationField: public BitField<int,
+      kCompareOperationFirstBit, kCompareOperationBitCount> {};  // NOLINT
+  class ToBooleanStateField: public BitField<int,
+      kToBooleanStateFirstBit, kToBooleanStateBitCount> {};  // NOLINT
+  class HasFunctionCacheField: public BitField<bool,
+      kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {};  // NOLINT
+
+  // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
+  static const int kStubMajorKeyFirstBit = 0;
+  static const int kSafepointTableOffsetFirstBit =
+      kStubMajorKeyFirstBit + kStubMajorKeyBits;
+  static const int kSafepointTableOffsetBitCount = 26;
+
+  STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
+  STATIC_ASSERT(kSafepointTableOffsetFirstBit +
+                kSafepointTableOffsetBitCount <= 32);
+
+  class SafepointTableOffsetField: public BitField<int,
+      kSafepointTableOffsetFirstBit,
+      kSafepointTableOffsetBitCount> {};  // NOLINT
+  class StubMajorKeyField: public BitField<int,
+      kStubMajorKeyFirstBit, kStubMajorKeyBits> {};  // NOLINT
+
+  // KindSpecificFlags2 layout (FUNCTION)
+  class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
+
   // Signed field cannot be encoded using the BitField class.
   static const int kArgumentsCountShift = 14;
   static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
@@ -4806,6 +4864,7 @@
   inline Object* GetBackPointer();
   inline void SetBackPointer(Object* value,
                              WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+  inline void init_back_pointer(Object* undefined);
 
   // [prototype transitions]: cache of prototype transitions.
   // Prototype transition is a transition that happens
@@ -4815,27 +4874,29 @@
   //    1: back pointer that overlaps with prototype transitions field.
   //    2 + 2 * i: prototype
   //    3 + 2 * i: target map
-  DECL_ACCESSORS(prototype_transitions, FixedArray)
+  inline FixedArray* GetPrototypeTransitions();
+  MUST_USE_RESULT inline MaybeObject* SetPrototypeTransitions(
+      FixedArray* prototype_transitions);
+  inline bool HasPrototypeTransitions();
 
-  inline void init_prototype_transitions(Object* undefined);
-  inline HeapObject* unchecked_prototype_transitions();
+  inline HeapObject* UncheckedPrototypeTransitions();
+  inline TransitionArray* unchecked_transition_array();
 
-  static const int kProtoTransitionHeaderSize = 2;
+  static const int kProtoTransitionHeaderSize = 1;
   static const int kProtoTransitionNumberOfEntriesOffset = 0;
-  static const int kProtoTransitionBackPointerOffset = 1;
   static const int kProtoTransitionElementsPerEntry = 2;
   static const int kProtoTransitionPrototypeOffset = 0;
   static const int kProtoTransitionMapOffset = 1;
 
   inline int NumberOfProtoTransitions() {
-    FixedArray* cache = prototype_transitions();
+    FixedArray* cache = GetPrototypeTransitions();
     if (cache->length() == 0) return 0;
     return
         Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
   }
 
   inline void SetNumberOfProtoTransitions(int value) {
-    FixedArray* cache = prototype_transitions();
+    FixedArray* cache = GetPrototypeTransitions();
     ASSERT(cache->length() != 0);
     cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
                          Smi::FromInt(value));
@@ -4995,17 +5056,14 @@
       kConstructorOffset + kPointerSize;
   static const int kCodeCacheOffset =
       kInstanceDescriptorsOrBitField3Offset + kPointerSize;
-  static const int kPrototypeTransitionsOrBackPointerOffset =
-      kCodeCacheOffset + kPointerSize;
-  static const int kPadStart =
-      kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+  static const int kBackPointerOffset = kCodeCacheOffset + kPointerSize;
+  static const int kPadStart = kBackPointerOffset + kPointerSize;
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
   // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
-  static const int kPointerFieldsEndOffset =
-      kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
+  static const int kPointerFieldsEndOffset = kBackPointerOffset + kPointerSize;
 
   // Byte offsets within kInstanceSizesOffset.
   static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -5565,6 +5623,9 @@
   // Indicates that the function cannot be inlined.
   DECL_BOOLEAN_ACCESSORS(dont_inline)
 
+  // Indicates that code for this function cannot be cached.
+  DECL_BOOLEAN_ACCESSORS(dont_cache)
+
   // Indicates whether or not the code in the shared function support
   // deoptimization.
   inline bool has_deoptimization_support();
@@ -5799,6 +5860,7 @@
     kIsFunction,
     kDontOptimize,
     kDontInline,
+    kDontCache,
     kCompilerHintsCount  // Pseudo entry
   };
 
@@ -5865,6 +5927,9 @@
   // [context]: the context holding the module's locals, or undefined if none.
   DECL_ACCESSORS(context, Object)
 
+  // [scope_info]: Scope info.
+  DECL_ACCESSORS(scope_info, ScopeInfo)
+
   // Casting.
   static inline JSModule* cast(Object* obj);
 
@@ -5881,7 +5946,8 @@
 
   // Layout description.
   static const int kContextOffset = JSObject::kHeaderSize;
-  static const int kSize = kContextOffset + kPointerSize;
+  static const int kScopeInfoOffset = kContextOffset + kPointerSize;
+  static const int kSize = kScopeInfoOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSModule);
diff --git a/src/parser.cc b/src/parser.cc
index e8d20bf..e4efdf7 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1246,12 +1246,10 @@
 }
 
 
-Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
+Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
   // ModuleDeclaration:
   //    'module' Identifier Module
 
-  // Create new block with one expected declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true);
   Handle<String> name = ParseIdentifier(CHECK_OK);
 
 #ifdef DEBUG
@@ -1275,10 +1273,11 @@
   }
 #endif
 
-  // TODO(rossberg): Add initialization statement to block.
-
   if (names) names->Add(name, zone());
-  return block;
+  if (module->body() == NULL)
+    return factory()->NewEmptyStatement();
+  else
+    return module->body();
 }
 
 
@@ -1344,16 +1343,23 @@
   scope->set_end_position(scanner().location().end_pos);
   body->set_scope(scope);
 
-  // Instance objects have to be created ahead of time (before code generation
-  // linking them) because of potentially cyclic references between them.
-  // We create them here, to avoid another pass over the AST.
+  // Check that all exports are bound.
   Interface* interface = scope->interface();
+  for (Interface::Iterator it = interface->iterator();
+       !it.done(); it.Advance()) {
+    if (scope->LocalLookup(it.name()) == NULL) {
+      Handle<String> name(it.name());
+      ReportMessage("module_export_undefined",
+                    Vector<Handle<String> >(&name, 1));
+      *ok = false;
+      return NULL;
+    }
+  }
+
   interface->MakeModule(ok);
-  ASSERT(ok);
-  interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
-  ASSERT(ok);
+  ASSERT(*ok);
   interface->Freeze(ok);
-  ASSERT(ok);
+  ASSERT(*ok);
   return factory()->NewModuleLiteral(body, interface);
 }
 
@@ -1424,10 +1430,12 @@
 
   Module* result = factory()->NewModuleUrl(symbol);
   Interface* interface = result->interface();
-  interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
-  ASSERT(ok);
   interface->Freeze(ok);
-  ASSERT(ok);
+  ASSERT(*ok);
+  // Create dummy scope to avoid errors as long as the feature isn't finished.
+  Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+  interface->Unify(scope->interface(), zone(), ok);
+  ASSERT(*ok);
   return result;
 }
 
@@ -1743,7 +1751,7 @@
   Scope* declaration_scope = DeclarationScope(mode);
   Variable* var = NULL;
 
-  // If a function scope exists, then we can statically declare this
+  // If a suitable scope exists, then we can statically declare this
   // variable and also set its mode. In any case, a Declaration node
   // will be added to the scope so that the declaration can be added
   // to the corresponding activation frame at runtime if necessary.
@@ -1751,14 +1759,12 @@
   // to the calling function context.
   // Similarly, strict mode eval scope does not leak variable declarations to
   // the caller's scope so we declare all locals, too.
-  // Also for block scoped let/const bindings the variable can be
-  // statically declared.
   if (declaration_scope->is_function_scope() ||
       declaration_scope->is_strict_or_extended_eval_scope() ||
       declaration_scope->is_block_scope() ||
       declaration_scope->is_module_scope() ||
       declaration->AsModuleDeclaration() != NULL) {
-    // Declare the variable in the function scope.
+    // Declare the variable in the declaration scope.
     var = declaration_scope->LocalLookup(name);
     if (var == NULL) {
       // Declare the name.
diff --git a/src/parser.h b/src/parser.h
index 52d3d03..b510456 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -586,7 +586,7 @@
   void* ParseSourceElements(ZoneList<Statement*>* processor,
                             int end_token, bool is_eval, bool* ok);
   Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
-  Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
+  Statement* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
   Module* ParseModule(bool* ok);
   Module* ParseModuleLiteral(bool* ok);
   Module* ParseModulePath(bool* ok);
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index ea93006..d06b62c 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2013,16 +2013,6 @@
                          "descriptors", map->instance_descriptors(),
                          Map::kInstanceDescriptorsOrBitField3Offset);
   }
-  if (map->unchecked_prototype_transitions()->IsFixedArray()) {
-    TagObject(map->prototype_transitions(), "(prototype transitions)");
-    SetInternalReference(map, entry,
-                         "prototype_transitions", map->prototype_transitions(),
-                         Map::kPrototypeTransitionsOrBackPointerOffset);
-  } else {
-    SetInternalReference(map, entry,
-                         "back_pointer", map->GetBackPointer(),
-                         Map::kPrototypeTransitionsOrBackPointerOffset);
-  }
   SetInternalReference(map, entry,
                        "code_cache", map->code_cache(),
                        Map::kCodeCacheOffset);
diff --git a/src/runtime.cc b/src/runtime.cc
index 8ed3c71..93479d8 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4893,7 +4893,9 @@
 // to a built-in function such as Array.forEach.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  if (!isolate->IsDebuggerActive()) return isolate->heap()->false_value();
+  if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
+    return isolate->heap()->false_value();
+  }
   CONVERT_ARG_CHECKED(Object, callback, 0);
   // We do not step into the callback if it's a builtin or not even a function.
   if (!callback->IsJSFunction() || JSFunction::cast(callback)->IsBuiltin()) {
@@ -8849,19 +8851,25 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
+  ASSERT(args.length() == 1);
+  Object* obj = args[0];
+  return isolate->heap()->ToBoolean(obj->IsJSModule());
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
   NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 1);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0);
 
-  Context* context;
-  MaybeObject* maybe_context =
-      isolate->heap()->AllocateModuleContext(isolate->context(),
-                                             scope_info);
-  if (!maybe_context->To(&context)) return maybe_context;
-  // Also initialize the context slot of the instance object.
-  instance->set_context(context);
+  Context* context = Context::cast(instance->context());
+  Context* previous = isolate->context();
+  ASSERT(context->IsModuleContext());
+  // Initialize the context links.
+  context->set_previous(previous);
+  context->set_closure(previous->closure());
+  context->set_global(previous->global());
   isolate->set_context(context);
 
   return context;
diff --git a/src/runtime.h b/src/runtime.h
index 3430a58..c5c8bcb 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -283,6 +283,9 @@
   F(CreateArrayLiteral, 3, 1) \
   F(CreateArrayLiteralShallow, 3, 1) \
   \
+  /* Harmony modules */ \
+  F(IsJSModule, 1, 1) \
+  \
   /* Harmony proxies */ \
   F(CreateJSProxy, 2, 1) \
   F(CreateJSFunctionProxy, 4, 1) \
@@ -330,7 +333,7 @@
   F(PushWithContext, 2, 1) \
   F(PushCatchContext, 3, 1) \
   F(PushBlockContext, 2, 1) \
-  F(PushModuleContext, 2, 1) \
+  F(PushModuleContext, 1, 1) \
   F(DeleteContextSlot, 2, 1) \
   F(LoadContextSlot, 2, 2) \
   F(LoadContextSlotNoReferenceError, 2, 2) \
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 25f02f6..02b4323 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -193,7 +193,8 @@
     bool has_context = context_locals > 0 ||
         function_name_context_slot ||
         Type() == WITH_SCOPE ||
-        (Type() == FUNCTION_SCOPE && CallsEval());
+        (Type() == FUNCTION_SCOPE && CallsEval()) ||
+        Type() == MODULE_SCOPE;
     if (has_context) {
       return Context::MIN_CONTEXT_SLOTS + context_locals +
           (function_name_context_slot ? 1 : 0);
@@ -222,11 +223,7 @@
 
 
 bool ScopeInfo::HasContext() {
-  if (length() > 0) {
-    return ContextLength() > 0;
-  } else {
-    return false;
-  }
+  return ContextLength() > 0;
 }
 
 
diff --git a/src/scopes.cc b/src/scopes.cc
index b0fd10e..a5b12f8 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -29,6 +29,7 @@
 
 #include "scopes.h"
 
+#include "accessors.h"
 #include "bootstrapper.h"
 #include "compiler.h"
 #include "messages.h"
@@ -226,6 +227,12 @@
       for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
         s->scope_inside_with_ = true;
       }
+    } else if (context->IsModuleContext()) {
+      ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
+      current_scope = new(zone) Scope(current_scope,
+                                      MODULE_SCOPE,
+                                      Handle<ScopeInfo>(scope_info),
+                                      zone);
     } else if (context->IsFunctionContext()) {
       ScopeInfo* scope_info = context->closure()->shared()->scope_info();
       current_scope = new(zone) Scope(current_scope,
@@ -634,6 +641,12 @@
   // 3) Allocate variables.
   AllocateVariablesRecursively();
 
+  // 4) Allocate and link module instance objects.
+  if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+    AllocateModules(info);
+    LinkModules(info);
+  }
+
   return true;
 }
 
@@ -1119,7 +1132,8 @@
        inner_scope_calls_eval_ ||
        scope_contains_with_ ||
        is_catch_scope() ||
-       is_block_scope())) {
+       is_block_scope() ||
+       is_module_scope())) {
     var->set_is_used(true);
   }
   // Global variables do not need to be allocated.
@@ -1307,4 +1321,77 @@
       (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
 }
 
+
+void Scope::AllocateModules(CompilationInfo* info) {
+  ASSERT(is_global_scope() || is_module_scope());
+
+  if (is_module_scope()) {
+    ASSERT(interface_->IsFrozen());
+    ASSERT(scope_info_.is_null());
+
+    // TODO(rossberg): This has to be the initial compilation of this code.
+    // We currently do not allow recompiling any module definitions.
+    Handle<ScopeInfo> scope_info = GetScopeInfo();
+    Factory* factory = info->isolate()->factory();
+    Handle<Context> context = factory->NewModuleContext(scope_info);
+    Handle<JSModule> instance = factory->NewJSModule(context, scope_info);
+    context->set_module(*instance);
+
+    bool ok;
+    interface_->MakeSingleton(instance, &ok);
+    ASSERT(ok);
+  }
+
+  // Allocate nested modules.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    Scope* inner_scope = inner_scopes_.at(i);
+    if (inner_scope->is_module_scope()) {
+      inner_scope->AllocateModules(info);
+    }
+  }
+}
+
+
+void Scope::LinkModules(CompilationInfo* info) {
+  ASSERT(is_global_scope() || is_module_scope());
+
+  if (is_module_scope()) {
+    Handle<JSModule> instance = interface_->Instance();
+
+    // Populate the module instance object.
+    const PropertyAttributes ro_attr =
+        static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
+    const PropertyAttributes rw_attr =
+        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM);
+    for (Interface::Iterator it = interface_->iterator();
+         !it.done(); it.Advance()) {
+      if (it.interface()->IsModule()) {
+        Handle<Object> value = it.interface()->Instance();
+        ASSERT(!value.is_null());
+        JSReceiver::SetProperty(
+            instance, it.name(), value, ro_attr, kStrictMode);
+      } else {
+        Variable* var = LocalLookup(it.name());
+        ASSERT(var != NULL && var->IsContextSlot());
+        PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr;
+        Handle<AccessorInfo> info =
+            Accessors::MakeModuleExport(it.name(), var->index(), attr);
+        Handle<Object> result = SetAccessor(instance, info);
+        ASSERT(!(result.is_null() || result->IsUndefined()));
+        USE(result);
+      }
+    }
+    USE(JSObject::PreventExtensions(instance));
+  }
+
+  // Link nested modules.
+  for (int i = 0; i < inner_scopes_.length(); i++) {
+    Scope* inner_scope = inner_scopes_.at(i);
+    if (inner_scope->is_module_scope()) {
+      inner_scope->LinkModules(info);
+    }
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/scopes.h b/src/scopes.h
index 42339a9..f8826c8 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -280,7 +280,8 @@
   bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
   bool is_with_scope() const { return type_ == WITH_SCOPE; }
   bool is_declaration_scope() const {
-    return is_eval_scope() || is_function_scope() || is_global_scope();
+    return is_eval_scope() || is_function_scope() ||
+        is_module_scope() || is_global_scope();
   }
   bool is_classic_mode() const {
     return language_mode() == CLASSIC_MODE;
@@ -590,6 +591,13 @@
   bool AllocateVariables(CompilationInfo* info,
                          AstNodeFactory<AstNullVisitor>* factory);
 
+  // Instance objects have to be created ahead of time (before code generation)
+  // because of potentially cyclic references between them.
+  // Linking also has to be a separate stage, since populating one object may
+  // potentially require (forward) references to others.
+  void AllocateModules(CompilationInfo* info);
+  void LinkModules(CompilationInfo* info);
+
  private:
   // Construct a scope based on the scope info.
   Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 9061883..517fdc8 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -82,6 +82,42 @@
 }
 
 
+bool TransitionArray::HasPrototypeTransitions() {
+  Object* prototype_transitions = get(kPrototypeTransitionsIndex);
+  return prototype_transitions != Smi::FromInt(0);
+}
+
+
+FixedArray* TransitionArray::GetPrototypeTransitions() {
+  Object* prototype_transitions = get(kPrototypeTransitionsIndex);
+  return FixedArray::cast(prototype_transitions);
+}
+
+
+HeapObject* TransitionArray::UncheckedPrototypeTransitions() {
+  Object* prototype_transitions = get(kPrototypeTransitionsIndex);
+  if (prototype_transitions == Smi::FromInt(0)) return NULL;
+  return reinterpret_cast<HeapObject*>(prototype_transitions);
+}
+
+
+void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
+                                              WriteBarrierMode mode) {
+  ASSERT(this != NULL);
+  ASSERT(transitions->IsFixedArray());
+  Heap* heap = GetHeap();
+  WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions);
+  CONDITIONAL_WRITE_BARRIER(
+      heap, this, kPrototypeTransitionsOffset, transitions, mode);
+}
+
+
+Object** TransitionArray::GetPrototypeTransitionsSlot() {
+  return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
+                              kPrototypeTransitionsOffset);
+}
+
+
 Object** TransitionArray::GetKeySlot(int transition_number) {
   ASSERT(transition_number < number_of_transitions());
   return HeapObject::RawField(
@@ -148,7 +184,7 @@
 }
 
 
-Object** TransitionArray::GetElementsSlot() {
+Object** TransitionArray::GetElementsTransitionSlot() {
   return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
                               kElementsTransitionOffset);
 }
diff --git a/src/transitions.cc b/src/transitions.cc
index 033f224..1e965cd 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -45,6 +45,7 @@
   }
 
   array->set(kElementsTransitionIndex, Smi::FromInt(0));
+  array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
   return array;
 }
 
@@ -98,6 +99,10 @@
     result->set_elements_transition(elements_transition());
   }
 
+  if (HasPrototypeTransitions()) {
+    result->SetPrototypeTransitions(GetPrototypeTransitions());
+  }
+
   FixedArray::WhitenessWitness witness(result);
 
   if (insertion_index != kNotFound) {
diff --git a/src/transitions.h b/src/transitions.h
index 5abdf99..a4b5b27 100644
--- a/src/transitions.h
+++ b/src/transitions.h
@@ -46,22 +46,33 @@
 // [length() - kTransitionSize] Last transition
 class TransitionArray: public FixedArray {
  public:
+  // Accessors for fetching instance transition at transition number.
+  inline String* GetKey(int transition_number);
+  inline void SetKey(int transition_number, String* value);
+  inline Object** GetKeySlot(int transition_number);
+
+  inline Object* GetValue(int transition_number);
+  inline void SetValue(int transition_number, Object* value);
+  inline Object** GetValueSlot(int transition_number);
+
+  inline Map* GetTargetMap(int transition_number);
+  inline PropertyDetails GetTargetDetails(int transition_number);
+
   inline Map* elements_transition();
   inline void set_elements_transition(
       Map* value,
       WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-  inline void ClearElementsTransition();
+  inline Object** GetElementsTransitionSlot();
   inline bool HasElementsTransition();
-  // Accessors for fetching instance transition at transition number.
-  inline String* GetKey(int transition_number);
-  inline Object** GetKeySlot(int transition_number);
-  inline void SetKey(int transition_number, String* value);
-  inline Object* GetValue(int transition_number);
-  inline Object** GetValueSlot(int transition_number);
-  inline void SetValue(int transition_number, Object* value);
-  inline Map* GetTargetMap(int transition_number);
-  inline PropertyDetails GetTargetDetails(int transition_number);
-  inline Object** GetElementsSlot();
+  inline void ClearElementsTransition();
+
+  inline FixedArray* GetPrototypeTransitions();
+  inline void SetPrototypeTransitions(
+      FixedArray* prototype_transitions,
+      WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+  inline Object** GetPrototypeTransitionsSlot();
+  inline bool HasPrototypeTransitions();
+  inline HeapObject* UncheckedPrototypeTransitions();
 
   // Returns the number of transitions in the array.
   int number_of_transitions() {
@@ -99,11 +110,14 @@
   static const int kNotFound = -1;
 
   static const int kElementsTransitionIndex = 0;
-  static const int kFirstIndex = 1;
+  static const int kPrototypeTransitionsIndex = 1;
+  static const int kFirstIndex = 2;
 
   // Layout transition array header.
   static const int kElementsTransitionOffset = FixedArray::kHeaderSize;
-  static const int kFirstOffset = kElementsTransitionOffset + kPointerSize;
+  static const int kPrototypeTransitionsOffset = kElementsTransitionOffset +
+                                                 kPointerSize;
+  static const int kFirstOffset = kPrototypeTransitionsOffset + kPointerSize;
 
   // Layout of map transition.
   static const int kTransitionKey = 0;
diff --git a/src/version.cc b/src/version.cc
index 5aae850..87c152e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     12
-#define BUILD_NUMBER      9
+#define BUILD_NUMBER      10
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)