Version 3.10.0

Fixed store IC writability check in strict mode (Chromium issue 120099).

Resynchronize timers if the Windows system time was changed. (Chromium issue 119815)

Removed "-mfloat-abi=hard" from host compiler cflags when building for hardfp ARM (https://code.google.com/p/chrome-os-partner/issues/detail?id=8539)

Fixed edge case for case independent regexp character classes (issue 2032).

Reset function info counters after context disposal. (Chromium issue 117767, V8 issue 1902)

Fixed missing write barrier in CopyObjectToObjectElements. (Chromium issue 119926)

Fixed missing bounds check in HasElementImpl. (Chromium issue 119925)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@11192 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index fde7a80..0d0b535 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -101,6 +101,7 @@
     objects.cc
     objects-printer.cc
     objects-visiting.cc
+    once.cc
     parser.cc
     preparser.cc
     preparse-data.cc
diff --git a/src/api.cc b/src/api.cc
index 20e1f88..eab7c80 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -512,6 +512,16 @@
 }
 
 
+void RegisteredExtension::UnregisterAll() {
+  RegisteredExtension* re = first_extension_;
+  while (re != NULL) {
+    RegisteredExtension* next = re->next();
+    delete re;
+    re = next;
+  }
+}
+
+
 void RegisterExtension(Extension* that) {
   RegisteredExtension* extension = new RegisteredExtension(that);
   RegisteredExtension::Register(extension);
@@ -4170,7 +4180,7 @@
 
 
 bool v8::V8::Initialize() {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  i::Isolate* isolate = i::Isolate::Current();
   if (isolate != NULL && isolate->IsInitialized()) {
     return true;
   }
@@ -5063,7 +5073,7 @@
 
 
 Local<Integer> v8::Integer::New(int32_t value) {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  i::Isolate* isolate = i::Isolate::Current();
   EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
   if (i::Smi::IsValid(value)) {
     return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
@@ -5341,7 +5351,7 @@
 
 
 Isolate* Isolate::GetCurrent() {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  i::Isolate* isolate = i::Isolate::Current();
   return reinterpret_cast<Isolate*>(isolate);
 }
 
@@ -6019,7 +6029,7 @@
 }
 
 
-uint64_t HeapGraphNode::GetId() const {
+SnapshotObjectId HeapGraphNode::GetId() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
   return ToInternal(this)->id();
@@ -6134,11 +6144,11 @@
 }
 
 
-const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
+const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
   return reinterpret_cast<const HeapGraphNode*>(
-      ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id)));
+      ToInternal(this)->GetEntryById(id));
 }
 
 
@@ -6157,6 +6167,13 @@
 }
 
 
+SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
+  return ToInternal(this)->max_snapshot_js_object_id();
+}
+
+
 void HeapSnapshot::Serialize(OutputStream* stream,
                              HeapSnapshot::SerializationFormat format) const {
   i::Isolate* isolate = i::Isolate::Current();
diff --git a/src/api.h b/src/api.h
index 89cf0c8..3ad57f4 100644
--- a/src/api.h
+++ b/src/api.h
@@ -146,6 +146,7 @@
  public:
   explicit RegisteredExtension(Extension* extension);
   static void Register(RegisteredExtension* that);
+  static void UnregisterAll();
   Extension* extension() { return extension_; }
   RegisteredExtension* next() { return next_; }
   RegisteredExtension* next_auto() { return next_auto_; }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index a97f31d..ec28da4 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -237,20 +237,21 @@
 
 // add(sp, sp, 4) instruction (aka Pop())
 const Instr kPopInstruction =
-    al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+    al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
+        kRegister_sp_Code * B12;
 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
 // register r is not encoded.
 const Instr kPushRegPattern =
-    al | B26 | 4 | NegPreIndex | sp.code() * B16;
+    al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
 // register r is not encoded.
 const Instr kPopRegPattern =
-    al | B26 | L | 4 | PostIndex | sp.code() * B16;
+    al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
 // mov lr, pc
-const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
 // ldr rd, [pc, #offset]
 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
+const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
 // blxcc rm
 const Instr kBlxRegMask =
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -273,13 +274,13 @@
 
 // A mask for the Rd register for push, pop, ldr, str instructions.
 const Instr kLdrRegFpOffsetPattern =
-    al | B26 | L | Offset | fp.code() * B16;
+    al | B26 | L | Offset | kRegister_fp_Code * B16;
 const Instr kStrRegFpOffsetPattern =
-    al | B26 | Offset | fp.code() * B16;
+    al | B26 | Offset | kRegister_fp_Code * B16;
 const Instr kLdrRegFpNegOffsetPattern =
-    al | B26 | L | NegOffset | fp.code() * B16;
+    al | B26 | L | NegOffset | kRegister_fp_Code * B16;
 const Instr kStrRegFpNegOffsetPattern =
-    al | B26 | NegOffset | fp.code() * B16;
+    al | B26 | NegOffset | kRegister_fp_Code * B16;
 const Instr kLdrStrInstrTypeMask = 0xffff0000;
 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
 const Instr kLdrStrOffsetMask = 0x00000fff;
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e98cd1d..e2d5f59 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -124,24 +124,47 @@
   int code_;
 };
 
-const Register no_reg = { -1 };
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;
+const int kRegister_r1_Code = 1;
+const int kRegister_r2_Code = 2;
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_fp_Code = 11;
+const int kRegister_ip_Code = 12;
+const int kRegister_sp_Code = 13;
+const int kRegister_lr_Code = 14;
+const int kRegister_pc_Code = 15;
 
-const Register r0  = {  0 };
-const Register r1  = {  1 };
-const Register r2  = {  2 };
-const Register r3  = {  3 };
-const Register r4  = {  4 };
-const Register r5  = {  5 };
-const Register r6  = {  6 };
-const Register r7  = {  7 };
-const Register r8  = {  8 };  // Used as context register.
-const Register r9  = {  9 };  // Used as lithium codegen scratch register.
-const Register r10 = { 10 };  // Used as roots register.
-const Register fp  = { 11 };
-const Register ip  = { 12 };
-const Register sp  = { 13 };
-const Register lr  = { 14 };
-const Register pc  = { 15 };
+const Register no_reg = { kRegister_no_reg_Code };
+
+const Register r0  = { kRegister_r0_Code };
+const Register r1  = { kRegister_r1_Code };
+const Register r2  = { kRegister_r2_Code };
+const Register r3  = { kRegister_r3_Code };
+const Register r4  = { kRegister_r4_Code };
+const Register r5  = { kRegister_r5_Code };
+const Register r6  = { kRegister_r6_Code };
+const Register r7  = { kRegister_r7_Code };
+// Used as context register.
+const Register r8  = { kRegister_r8_Code };
+// Used as lithium codegen scratch register.
+const Register r9  = { kRegister_r9_Code };
+// Used as roots register.
+const Register r10 = { kRegister_r10_Code };
+const Register fp  = { kRegister_fp_Code };
+const Register ip  = { kRegister_ip_Code };
+const Register sp  = { kRegister_sp_Code };
+const Register lr  = { kRegister_lr_Code };
+const Register pc  = { kRegister_pc_Code };
+
 
 // Single word VFP register.
 struct SwVfpRegister {
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 250f020..f772db9 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -480,7 +480,7 @@
   __ b(gt, &not_special);
 
   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
-  static const uint32_t exponent_word_for_1 =
+  const uint32_t exponent_word_for_1 =
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
   __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
   // 1, 0 and -1 all have 0 for the second word.
@@ -4237,7 +4237,7 @@
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The displacement is the offset of the last parameter (if any)
   // relative to the frame pointer.
-  static const int kDisplacement =
+  const int kDisplacement =
       StandardFrameConstants::kCallerSPOffset - kPointerSize;
 
   // Check that the key is a smi.
@@ -4622,10 +4622,10 @@
   //  sp[8]: subject string
   //  sp[12]: JSRegExp object
 
-  static const int kLastMatchInfoOffset = 0 * kPointerSize;
-  static const int kPreviousIndexOffset = 1 * kPointerSize;
-  static const int kSubjectOffset = 2 * kPointerSize;
-  static const int kJSRegExpOffset = 3 * kPointerSize;
+  const int kLastMatchInfoOffset = 0 * kPointerSize;
+  const int kPreviousIndexOffset = 1 * kPointerSize;
+  const int kSubjectOffset = 2 * kPointerSize;
+  const int kJSRegExpOffset = 3 * kPointerSize;
 
   Label runtime, invoke_regexp;
 
@@ -4824,8 +4824,8 @@
   __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
 
   // Isolates: note we add an additional parameter here (isolate pointer).
-  static const int kRegExpExecuteArguments = 8;
-  static const int kParameterRegisters = 4;
+  const int kRegExpExecuteArguments = 8;
+  const int kParameterRegisters = 4;
   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
 
   // Stack pointer now points to cell where return address is to be written.
@@ -5714,7 +5714,7 @@
   // scratch: -
 
   // Perform a number of probes in the symbol table.
-  static const int kProbes = 4;
+  const int kProbes = 4;
   Label found_in_symbol_table;
   Label next_probe[kProbes];
   Register candidate = scratch5;  // Scratch register contains candidate.
@@ -5839,9 +5839,9 @@
   //  0 <= from <= to <= string.length.
   // If any of these assumptions fail, we call the runtime system.
 
-  static const int kToOffset = 0 * kPointerSize;
-  static const int kFromOffset = 1 * kPointerSize;
-  static const int kStringOffset = 2 * kPointerSize;
+  const int kToOffset = 0 * kPointerSize;
+  const int kFromOffset = 1 * kPointerSize;
+  const int kStringOffset = 2 * kPointerSize;
 
   __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
@@ -7085,43 +7085,45 @@
   RememberedSetAction action;
 };
 
+#define REG(Name) { kRegister_ ## Name ## _Code }
 
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
   // Used in RegExpExecStub.
-  { r6, r4, r7, EMIT_REMEMBERED_SET },
-  { r6, r2, r7, EMIT_REMEMBERED_SET },
+  { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
+  { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
   // Used in CompileArrayPushCall.
   // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
   // Also used in KeyedStoreIC::GenerateGeneric.
-  { r3, r4, r5, EMIT_REMEMBERED_SET },
+  { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
   // Used in CompileStoreGlobal.
-  { r4, r1, r2, OMIT_REMEMBERED_SET },
+  { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
   // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { r1, r2, r3, EMIT_REMEMBERED_SET },
-  { r3, r2, r1, EMIT_REMEMBERED_SET },
+  { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
+  { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
   // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { r2, r1, r3, EMIT_REMEMBERED_SET },
-  { r3, r1, r2, EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
+  { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
   // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { r3, r2, r4, EMIT_REMEMBERED_SET },
-  { r2, r3, r4, EMIT_REMEMBERED_SET },
+  { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
   // ElementsTransitionGenerator::GenerateSmiOnlyToObject
   // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
   // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { r2, r3, r9, EMIT_REMEMBERED_SET },
-  { r2, r3, r9, OMIT_REMEMBERED_SET },
+  { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
   // ElementsTransitionGenerator::GenerateDoubleToObject
-  { r6, r2, r0, EMIT_REMEMBERED_SET },
-  { r2, r6, r9, EMIT_REMEMBERED_SET },
+  { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
+  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
   // StoreArrayLiteralElementStub::Generate
-  { r5, r0, r6, EMIT_REMEMBERED_SET },
+  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
   // Null termination.
-  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
 
+#undef REG
 
 bool RecordWriteStub::IsPregenerated() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
        !entry->object.is(no_reg);
        entry++) {
     if (object_.is(entry->object) &&
@@ -7148,7 +7150,7 @@
 
 
 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
        !entry->object.is(no_reg);
        entry++) {
     RecordWriteStub stub(entry->object,
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0cbd46e..db0c3b4 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -110,13 +110,6 @@
 };
 
 
-// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
-int FullCodeGenerator::self_optimization_header_size() {
-  UNREACHABLE();
-  return 24;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index cdc1947..ab23b11 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1753,9 +1753,9 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckMap(value);
+  LInstruction* result = new(zone()) LCheckMaps(value);
   return AssignEnvironment(result);
 }
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 62cde6e..a919a1d 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -72,7 +72,7 @@
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
   V(CheckNonSmi)                                \
-  V(CheckMap)                                   \
+  V(CheckMaps)                                  \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
   V(ClampDToUint8)                              \
@@ -1889,14 +1889,14 @@
 };
 
 
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
  public:
-  explicit LCheckMap(LOperand* value) {
+  explicit LCheckMaps(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
-  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
 };
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 82b80a2..7a099cc 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -4338,14 +4338,22 @@
 }
 
 
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   Register scratch = scratch0();
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
-                   instr->environment());
+
+  Label success;
+  SmallMapList* map_set = instr->hydrogen()->map_set();
+  for (int i = 0; i < map_set->length() - 1; i++) {
+    Handle<Map> map = map_set->at(i);
+    __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
+    __ b(eq, &success);
+  }
+  Handle<Map> map = map_set->last();
+  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+  __ bind(&success);
 }
 
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 10ff2dd..9fd4bdf 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -480,6 +480,44 @@
 }
 
 
+void RegExpMacroAssemblerARM::CheckCharacterInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_in_range) {
+  __ sub(r0, current_character(), Operand(from));
+  __ cmp(r0, Operand(to - from));
+  BranchOrBacktrack(ls, on_in_range);  // Unsigned lower-or-same condition.
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_not_in_range) {
+  __ sub(r0, current_character(), Operand(from));
+  __ cmp(r0, Operand(to - from));
+  BranchOrBacktrack(hi, on_not_in_range);  // Unsigned higher condition.
+}
+
+
+void RegExpMacroAssemblerARM::CheckBitInTable(
+    Handle<ByteArray> table,
+    Label* on_bit_set) {
+  __ mov(r0, Operand(table));
+  if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+    __ and_(r1, current_character(), Operand(kTableSize - 1));
+    __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ add(r1,
+           current_character(),
+           Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+  }
+  __ ldrb(r0, MemOperand(r0, r1));
+  __ cmp(r0, Operand(0));
+  BranchOrBacktrack(ne, on_bit_set);
+}
+
+
 bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
                                                          Label* on_no_match) {
   // Range checks (c in min..max) are generally implemented by an unsigned
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 5c8ed06..14f984f 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -79,6 +79,14 @@
                                               uc16 minus,
                                               uc16 mask,
                                               Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
   // Checks whether the given offset from the current position is before
   // the end of the string.
   virtual void CheckPosition(int cp_offset, Label* on_outside_input);
diff --git a/src/assembler.cc b/src/assembler.cc
index 07509b5..4944202 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -45,6 +45,7 @@
 #include "ic.h"
 #include "isolate.h"
 #include "jsregexp.h"
+#include "lazy-instance.h"
 #include "platform.h"
 #include "regexp-macro-assembler.h"
 #include "regexp-stack.h"
@@ -84,15 +85,36 @@
 namespace v8 {
 namespace internal {
 
+// -----------------------------------------------------------------------------
+// Common double constants.
 
-const double DoubleConstant::min_int = kMinInt;
-const double DoubleConstant::one_half = 0.5;
-const double DoubleConstant::minus_zero = -0.0;
-const double DoubleConstant::uint8_max_value = 255;
-const double DoubleConstant::zero = 0.0;
-const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
-const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
-const double DoubleConstant::negative_infinity = -V8_INFINITY;
+struct DoubleConstant BASE_EMBEDDED {
+  double min_int;
+  double one_half;
+  double minus_zero;
+  double zero;
+  double uint8_max_value;
+  double negative_infinity;
+  double canonical_non_hole_nan;
+  double the_hole_nan;
+};
+
+struct InitializeDoubleConstants {
+  static void Construct(DoubleConstant* double_constants) {
+    double_constants->min_int = kMinInt;
+    double_constants->one_half = 0.5;
+    double_constants->minus_zero = -0.0;
+    double_constants->uint8_max_value = 255;
+    double_constants->zero = 0.0;
+    double_constants->canonical_non_hole_nan = OS::nan_value();
+    double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
+    double_constants->negative_infinity = -V8_INFINITY;
+  }
+};
+
+static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
+    double_constants = LAZY_INSTANCE_INITIALIZER;
+
 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
 
 // -----------------------------------------------------------------------------
@@ -937,49 +959,49 @@
 
 ExternalReference ExternalReference::address_of_min_int() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::min_int)));
+      &double_constants.Pointer()->min_int));
 }
 
 
 ExternalReference ExternalReference::address_of_one_half() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::one_half)));
+      &double_constants.Pointer()->one_half));
 }
 
 
 ExternalReference ExternalReference::address_of_minus_zero() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::minus_zero)));
+      &double_constants.Pointer()->minus_zero));
 }
 
 
 ExternalReference ExternalReference::address_of_zero() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::zero)));
+      &double_constants.Pointer()->zero));
 }
 
 
 ExternalReference ExternalReference::address_of_uint8_max_value() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::uint8_max_value)));
+      &double_constants.Pointer()->uint8_max_value));
 }
 
 
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::negative_infinity)));
+      &double_constants.Pointer()->negative_infinity));
 }
 
 
 ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
+      &double_constants.Pointer()->canonical_non_hole_nan));
 }
 
 
 ExternalReference ExternalReference::address_of_the_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      const_cast<double*>(&DoubleConstant::the_hole_nan)));
+      &double_constants.Pointer()->the_hole_nan));
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index 5063879..918a2a6 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -67,21 +67,6 @@
   int jit_cookie_;
 };
 
-// -----------------------------------------------------------------------------
-// Common double constants.
-
-class DoubleConstant: public AllStatic {
- public:
-  static const double min_int;
-  static const double one_half;
-  static const double minus_zero;
-  static const double zero;
-  static const double uint8_max_value;
-  static const double negative_infinity;
-  static const double canonical_non_hole_nan;
-  static const double the_hole_nan;
-};
-
 
 // -----------------------------------------------------------------------------
 // Labels represent pc locations; they are typically jump or call targets.
diff --git a/src/ast.h b/src/ast.h
index b827302..d6c47e2 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -270,6 +270,7 @@
 
   void Reserve(int capacity) { list_.Reserve(capacity); }
   void Clear() { list_.Clear(); }
+  void Sort() { list_.Sort(); }
 
   bool is_empty() const { return list_.is_empty(); }
   int length() const { return list_.length(); }
diff --git a/src/builtins.cc b/src/builtins.cc
index 1b84d84..0f493e6 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1567,30 +1567,30 @@
   BuiltinExtraArguments extra_args;
 };
 
+#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
+
 class BuiltinFunctionTable {
  public:
-  BuiltinFunctionTable() {
-    Builtins::InitBuiltinFunctionTable();
+  BuiltinDesc* functions() {
+    CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
+    return functions_;
   }
 
-  static const BuiltinDesc* functions() { return functions_; }
-
- private:
-  static BuiltinDesc functions_[Builtins::builtin_count + 1];
+  OnceType once_;
+  BuiltinDesc functions_[Builtins::builtin_count + 1];
 
   friend class Builtins;
 };
 
-BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
-
-static const BuiltinFunctionTable builtin_function_table_init;
+static BuiltinFunctionTable builtin_function_table =
+    BUILTIN_FUNCTION_TABLE_INIT;
 
 // Define array of pointers to generators and C builtin functions.
 // We do this in a sort of roundabout way so that we can do the initialization
 // within the lexical scope of Builtins:: and within a context where
 // Code::Flags names a non-abstract type.
 void Builtins::InitBuiltinFunctionTable() {
-  BuiltinDesc* functions = BuiltinFunctionTable::functions_;
+  BuiltinDesc* functions = builtin_function_table.functions_;
   functions[builtin_count].generator = NULL;
   functions[builtin_count].c_code = NULL;
   functions[builtin_count].s_name = NULL;
@@ -1634,7 +1634,7 @@
   // Create a scope for the handles in the builtins.
   HandleScope scope(isolate);
 
-  const BuiltinDesc* functions = BuiltinFunctionTable::functions();
+  const BuiltinDesc* functions = builtin_function_table.functions();
 
   // For now we generate builtin adaptor code into a stack-allocated
   // buffer, before copying it into individual code objects. Be careful
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index b13efb3..c7cc66e 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -72,24 +72,23 @@
 V(AND_CHECK_CHAR,    28, 12)  /* bc8 pad8 uint16 uint32 addr32              */ \
 V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32          */ \
 V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32              */ \
-V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32            */ \
-V(CHECK_LT,          32, 8)   /* bc8 pad8 uc16 addr32                       */ \
-V(CHECK_GT,          33, 8)   /* bc8 pad8 uc16 addr32                       */ \
-V(CHECK_NOT_BACK_REF, 34, 8)  /* bc8 reg_idx24 addr32                       */ \
-V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32                */ \
-V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32            */ \
-V(LOOKUP_MAP1,       37, 12)  /* bc8 pad8 start16 bit_map_addr32 addr32     */ \
-V(LOOKUP_MAP2,       38, 96)  /* bc8 pad8 start16 half_nibble_map_addr32*   */ \
-V(LOOKUP_MAP8,       39, 96)  /* bc8 pad8  start16 byte_map addr32*         */ \
-V(LOOKUP_HI_MAP8,    40, 96)  /* bc8 start24 byte_map_addr32 addr32*        */ \
-V(CHECK_REGISTER_LT, 41, 12)  /* bc8 reg_idx24 value32 addr32               */ \
-V(CHECK_REGISTER_GE, 42, 12)  /* bc8 reg_idx24 value32 addr32               */ \
-V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32                     */ \
-V(CHECK_AT_START,    44, 8)   /* bc8 pad24 addr32                           */ \
-V(CHECK_NOT_AT_START, 45, 8)  /* bc8 pad24 addr32                           */ \
-V(CHECK_GREEDY,      46, 8)   /* bc8 pad24 addr32                           */ \
-V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32                        */ \
-V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24                        */
+V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32       */ \
+V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32                */ \
+V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32            */ \
+V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128                   */ \
+V(CHECK_LT,          35, 8)   /* bc8 pad8 uc16 addr32                       */ \
+V(CHECK_GT,          36, 8)   /* bc8 pad8 uc16 addr32                       */ \
+V(CHECK_NOT_BACK_REF, 37, 8)  /* bc8 reg_idx24 addr32                       */ \
+V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32                */ \
+V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32            */ \
+V(CHECK_REGISTER_LT, 40, 12)  /* bc8 reg_idx24 value32 addr32               */ \
+V(CHECK_REGISTER_GE, 41, 12)  /* bc8 reg_idx24 value32 addr32               */ \
+V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32                     */ \
+V(CHECK_AT_START,    43, 8)   /* bc8 pad24 addr32                           */ \
+V(CHECK_NOT_AT_START, 44, 8)  /* bc8 pad24 addr32                           */ \
+V(CHECK_GREEDY,      45, 8)   /* bc8 pad24 addr32                           */ \
+V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32                        */ \
+V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24                        */
 
 #define DECLARE_BYTECODES(name, code, length) \
   static const int BC_##name = code;
diff --git a/src/compiler.cc b/src/compiler.cc
index 2272337..c9c2480 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -531,6 +531,10 @@
     if (extension == NULL && !result.is_null()) {
       compilation_cache->PutScript(source, result);
     }
+  } else {
+    if (result->ic_age() != HEAP->global_ic_age()) {
+      result->ResetForNewContext(HEAP->global_ic_age());
+    }
   }
 
   if (result.is_null()) isolate->ReportPendingMessages();
@@ -586,6 +590,10 @@
       compilation_cache->PutEval(
           source, context, is_global, result, scope_position);
     }
+  } else {
+    if (result->ic_age() != HEAP->global_ic_age()) {
+      result->ResetForNewContext(HEAP->global_ic_age());
+    }
   }
 
   return result;
diff --git a/src/d8.cc b/src/d8.cc
index 45781cf..1e8b4c8 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -426,14 +426,25 @@
   }
 
   Persistent<Object> persistent_array = Persistent<Object>::New(array);
-  persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
-  persistent_array.MarkIndependent();
   if (data == NULL && length != 0) {
-    data = calloc(length, element_size);
+    // Make sure the total size fits into a (signed) int.
+    static const int kMaxSize = 0x7fffffff;
+    if (length > (kMaxSize - sizeof(size_t)) / element_size) {
+      return ThrowException(String::New("Array exceeds maximum size (2G)"));
+    }
+    // Prepend the size of the allocated chunk to the data itself.
+    int total_size = length * element_size + sizeof(size_t);
+    data = malloc(total_size);
     if (data == NULL) {
       return ThrowException(String::New("Memory allocation failed."));
     }
+    *reinterpret_cast<size_t*>(data) = total_size;
+    data = reinterpret_cast<size_t*>(data) + 1;
+    memset(data, 0, length * element_size);
+    V8::AdjustAmountOfExternalAllocatedMemory(total_size);
   }
+  persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
+  persistent_array.MarkIndependent();
 
   array->SetIndexedPropertiesToExternalArrayData(
       reinterpret_cast<uint8_t*>(data) + offset, type,
@@ -452,6 +463,9 @@
   Handle<Object> converted_object = object->ToObject();
   Local<Value> prop_value = converted_object->Get(prop_name);
   if (data != NULL && !prop_value->IsObject()) {
+    data = reinterpret_cast<size_t*>(data) - 1;
+    V8::AdjustAmountOfExternalAllocatedMemory(
+        -static_cast<int>(*reinterpret_cast<size_t*>(data)));
     free(data);
   }
   object.Dispose();
@@ -977,8 +991,8 @@
     printf("+--------------------------------------------+-------------+\n");
     delete [] counters;
   }
-  if (counters_file_ != NULL)
-    delete counters_file_;
+  delete counters_file_;
+  delete counter_map_;
 }
 #endif  // V8_SHARED
 
diff --git a/src/d8.js b/src/d8.js
index bf26923..819135a 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -2174,7 +2174,7 @@
           }
 
           var current_line = from_line + num;
-          spacer = maxdigits - (1 + Math.floor(log10(current_line)));
+          var spacer = maxdigits - (1 + Math.floor(log10(current_line)));
           if (current_line == Debug.State.currentSourceLine + 1) {
             for (var i = 0; i < maxdigits; i++) {
               result += '>';
diff --git a/src/date.js b/src/date.js
index 75edf6d..d0e24ab 100644
--- a/src/date.js
+++ b/src/date.js
@@ -516,8 +516,7 @@
   var t = LOCAL_DATE_VALUE(this);
   ms = ToNumber(ms);
   var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
-  SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-  return this;
+  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
 }
 
 
diff --git a/src/debug.cc b/src/debug.cc
index 01f6f39..b6cdd7f 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1847,13 +1847,6 @@
       // break slots.
       debug_break_slot_count++;
     }
-    if (frame_code->has_self_optimization_header() &&
-        !new_code->has_self_optimization_header()) {
-      delta -= FullCodeGenerator::self_optimization_header_size();
-    } else {
-      ASSERT(frame_code->has_self_optimization_header() ==
-             new_code->has_self_optimization_header());
-    }
     int debug_break_slot_bytes =
         debug_break_slot_count * Assembler::kDebugBreakSlotLength;
     if (FLAG_trace_deopt) {
diff --git a/src/elements.cc b/src/elements.cc
index 1d043a1..0fc3c53 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -1326,18 +1326,8 @@
 
 
 void ElementsAccessor::InitializeOncePerProcess() {
-  static struct ConcreteElementsAccessors {
-#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler;
-    ELEMENTS_LIST(ACCESSOR_STRUCT)
-#undef ACCESSOR_STRUCT
-  } element_accessors = {
-#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind),
-    ELEMENTS_LIST(ACCESSOR_INIT)
-#undef ACCESSOR_INIT
-  };
-
   static ElementsAccessor* accessor_array[] = {
-#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler,
+#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
     ELEMENTS_LIST(ACCESSOR_ARRAY)
 #undef ACCESSOR_ARRAY
   };
@@ -1349,6 +1339,14 @@
 }
 
 
+void ElementsAccessor::TearDown() {
+#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
+  ELEMENTS_LIST(ACCESSOR_DELETE)
+#undef ACCESSOR_DELETE
+  elements_accessors_ = NULL;
+}
+
+
 template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
 MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
                                   ElementsKindTraits>::
diff --git a/src/elements.h b/src/elements.h
index ff97c08..51d402d 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -131,6 +131,7 @@
   static ElementsAccessor* ForArray(FixedArrayBase* array);
 
   static void InitializeOncePerProcess();
+  static void TearDown();
 
  protected:
   friend class NonStrictArgumentsElementsAccessor;
diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc
index 9fbf329..50d8761 100644
--- a/src/extensions/externalize-string-extension.cc
+++ b/src/extensions/externalize-string-extension.cc
@@ -133,11 +133,8 @@
 
 
 void ExternalizeStringExtension::Register() {
-  static ExternalizeStringExtension* externalize_extension = NULL;
-  if (externalize_extension == NULL)
-    externalize_extension = new ExternalizeStringExtension;
-  static v8::DeclareExtension externalize_extension_declaration(
-      externalize_extension);
+  static ExternalizeStringExtension externalize_extension;
+  static v8::DeclareExtension declaration(&externalize_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index 573797e..f921552 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -46,9 +46,8 @@
 
 
 void GCExtension::Register() {
-  static GCExtension* gc_extension = NULL;
-  if (gc_extension == NULL) gc_extension = new GCExtension();
-  static v8::DeclareExtension gc_extension_declaration(gc_extension);
+  static GCExtension gc_extension;
+  static v8::DeclareExtension declaration(&gc_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 143099c..e8a9f26 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -537,6 +537,10 @@
           : isolate()->strict_mode_function_map(),
       pretenure);
 
+  if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
+    function_info->ResetForNewContext(isolate()->heap()->global_ic_age());
+  }
+
   result->set_context(*context);
   if (!function_info->bound()) {
     int number_of_literals = function_info->num_literals();
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 85b3eaa..0668add 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -81,17 +81,41 @@
 
 #ifdef FLAG_MODE_DECLARE
 // Structure used to hold a collection of arguments to the JavaScript code.
+#define JSARGUMENTS_INIT {{}}
 struct JSArguments {
 public:
-  JSArguments();
-  JSArguments(int argc, const char** argv);
-  int argc() const;
-  const char** argv();
-  const char*& operator[](int idx);
-  JSArguments& operator=(JSArguments args);
+  inline int argc() const {
+    return static_cast<int>(storage_[0]);
+  }
+  inline const char** argv() const {
+    return reinterpret_cast<const char**>(storage_[1]);
+  }
+  inline const char*& operator[] (int idx) const {
+    return argv()[idx];
+  }
+  inline JSArguments& operator=(JSArguments args) {
+    set_argc(args.argc());
+    set_argv(args.argv());
+    return *this;
+  }
+  static JSArguments Create(int argc, const char** argv) {
+    JSArguments args;
+    args.set_argc(argc);
+    args.set_argv(argv);
+    return args;
+  }
 private:
-  int argc_;
-  const char** argv_;
+  void set_argc(int argc) {
+    storage_[0] = argc;
+  }
+  void set_argv(const char** argv) {
+    storage_[1] = reinterpret_cast<AtomicWord>(argv);
+  }
+public:
+  // Contains argc and argv. Unfortunately we have to store these two fields
+  // into a single one to avoid making the initialization macro (which would be
+  // "{ 0, NULL }") contain a coma.
+  AtomicWord storage_[2];
 };
 #endif
 
@@ -422,7 +446,7 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSArguments(),
+DEFINE_args(js_arguments, JSARGUMENTS_INIT,
             "Pass all remaining arguments to the script. Alias for \"--\".")
 
 #if defined(WEBOS__)
diff --git a/src/flags.cc b/src/flags.cc
index 75e66ce..5720cbd 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -411,7 +411,7 @@
           for (int k = i; k < *argc; k++) {
             js_argv[k - start_pos] = StrDup(argv[k]);
           }
-          *flag->args_variable() = JSArguments(js_argc, js_argv);
+          *flag->args_variable() = JSArguments::Create(js_argc, js_argv);
           i = *argc;  // Consume all arguments
           break;
         }
@@ -534,19 +534,6 @@
   }
 }
 
-JSArguments::JSArguments()
-    : argc_(0), argv_(NULL) {}
-JSArguments::JSArguments(int argc, const char** argv)
-    : argc_(argc), argv_(argv) {}
-int JSArguments::argc() const { return argc_; }
-const char** JSArguments::argv() { return argv_; }
-const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
-JSArguments& JSArguments::operator=(JSArguments args) {
-    argc_ = args.argc_;
-    argv_ = args.argv_;
-    return *this;
-}
-
 
 void FlagList::EnforceFlagImplications() {
 #define FLAG_MODE_DEFINE_IMPLICATIONS
diff --git a/src/frames.cc b/src/frames.cc
index a6f1fd6..0571a81 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -31,6 +31,7 @@
 #include "deoptimizer.h"
 #include "frames-inl.h"
 #include "full-codegen.h"
+#include "lazy-instance.h"
 #include "mark-compact.h"
 #include "safepoint-table.h"
 #include "scopeinfo.h"
@@ -1380,12 +1381,12 @@
 };
 
 
-static const JSCallerSavedCodeData kCallerSavedCodeData;
-
+static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
+    LAZY_INSTANCE_INITIALIZER;
 
 int JSCallerSavedCode(int n) {
   ASSERT(0 <= n && n < kNumJSCallerSaved);
-  return kCallerSavedCodeData.reg_code[n];
+  return caller_saved_code_data.Get().reg_code[n];
 }
 
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index d963979..449c5d2 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -315,7 +315,6 @@
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
   code->set_optimizable(info->IsOptimizable() &&
                         !info->function()->flags()->Contains(kDontOptimize));
-  code->set_self_optimization_header(cgen.has_self_optimization_header_);
   cgen.PopulateDeoptimizationData(code);
   cgen.PopulateTypeFeedbackInfo(code);
   cgen.PopulateTypeFeedbackCells(code);
@@ -327,12 +326,10 @@
   code->set_compiled_optimizable(info->IsOptimizable());
 #endif  // ENABLE_DEBUGGER_SUPPORT
   code->set_allow_osr_at_loop_nesting_level(0);
+  code->set_profiler_ticks(0);
   code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
   info->SetCode(code);  // May be an empty handle.
-  if (!code.is_null()) {
-    isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size());
-  }
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit && !code.is_null()) {
     GDBJITLineInfo* lineinfo =
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 58d5986..a308d83 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -90,15 +90,10 @@
         stack_checks_(2),  // There's always at least one.
         type_feedback_cells_(info->HasDeoptimizationSupport()
                              ? info->function()->ast_node_count() : 0),
-        ic_total_count_(0),
-        has_self_optimization_header_(false) { }
+        ic_total_count_(0) { }
 
   static bool MakeCode(CompilationInfo* info);
 
-  // Returns the platform-specific size in bytes of the self-optimization
-  // header.
-  static int self_optimization_header_size();
-
   // Encode state and pc-offset as a BitField<type, start, size>.
   // Only use 30 bits because we encode the result as a smi.
   class StateField : public BitField<State, 0, 1> { };
@@ -796,7 +791,6 @@
   ZoneList<BailoutEntry> stack_checks_;
   ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
   int ic_total_count_;
-  bool has_self_optimization_header_;
   Handle<FixedArray> handler_table_;
   Handle<JSGlobalPropertyCell> profiling_counter_;
 
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 4192222..d3cd447 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -33,6 +33,7 @@
 #include "compiler.h"
 #include "global-handles.h"
 #include "messages.h"
+#include "platform.h"
 #include "natives.h"
 #include "scopeinfo.h"
 
@@ -2035,7 +2036,7 @@
 }
 
 
-Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
+static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
 
 
 void GDBJITInterface::AddCode(const char* name,
@@ -2045,7 +2046,7 @@
                               CompilationInfo* info) {
   if (!FLAG_gdbjit) return;
 
-  ScopedLock lock(mutex_);
+  ScopedLock lock(mutex.Pointer());
   AssertNoAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2126,7 +2127,7 @@
 void GDBJITInterface::RemoveCode(Code* code) {
   if (!FLAG_gdbjit) return;
 
-  ScopedLock lock(mutex_);
+  ScopedLock lock(mutex.Pointer());
   HashMap::Entry* e = GetEntries()->Lookup(code,
                                            HashForCodeObject(code),
                                            false);
@@ -2146,7 +2147,7 @@
 
 void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
                                                GDBJITLineInfo* line_info) {
-  ScopedLock lock(mutex_);
+  ScopedLock lock(mutex.Pointer());
   ASSERT(!IsLineInfoTagged(line_info));
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   ASSERT(e->value == NULL);
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index 2cf15bc..0eca938 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -132,9 +132,6 @@
   static void RemoveCode(Code* code);
 
   static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
-
- private:
-  static Mutex* mutex_;
 };
 
 #define GDBJIT(action) GDBJITInterface::action
diff --git a/src/heap.cc b/src/heap.cc
index 28cabc8..4ce1816 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -60,8 +60,7 @@
 namespace v8 {
 namespace internal {
 
-
-static Mutex* gc_initializer_mutex = OS::CreateMutex();
+static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
 
 
 Heap::Heap()
@@ -1954,7 +1953,7 @@
     if (!maybe_info->To(&info)) return maybe_info;
   }
   info->set_ic_total_count(0);
-  info->set_ic_with_typeinfo_count(0);
+  info->set_ic_with_type_info_count(0);
   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
                                 SKIP_WRITE_BARRIER);
   return info;
@@ -2898,9 +2897,9 @@
   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
   share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_deopt_counter(FLAG_deopt_every_n_times);
-  share->set_profiler_ticks(0);
   share->set_ast_node_count(0);
+  share->set_deopt_counter(FLAG_deopt_every_n_times);
+  share->set_ic_age(0);
 
   // Set integer fields (smi or int, depending on the architecture).
   share->set_length(0);
@@ -5866,7 +5865,7 @@
     if (!ConfigureHeapDefault()) return false;
   }
 
-  gc_initializer_mutex->Lock();
+  gc_initializer_mutex.Pointer()->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
       initialized_gc = true;
@@ -5874,7 +5873,7 @@
       NewSpaceScavenger::Initialize();
       MarkCompactCollector::Initialize();
   }
-  gc_initializer_mutex->Unlock();
+  gc_initializer_mutex.Pointer()->Unlock();
 
   MarkMapPointersAsEncoded(false);
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index f698da4..f81f5f0 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -965,16 +965,13 @@
 }
 
 
-void HCheckMap::PrintDataTo(StringStream* stream) {
+void HCheckMaps::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" %p", *map());
-  if (mode() == REQUIRE_EXACT_MAP) {
-    stream->Add(" [EXACT]");
-  } else if (!has_element_transitions_) {
-    stream->Add(" [EXACT*]");
-  } else {
-    stream->Add(" [MATCH ELEMENTS]");
+  stream->Add(" [%p", *map_set()->first());
+  for (int i = 1; i < map_set()->length(); ++i) {
+    stream->Add(",%p", *map_set()->at(i));
   }
+  stream->Add("]");
 }
 
 
@@ -1879,7 +1876,7 @@
 }
 
 
-HType HCheckMap::CalculateInferredType() {
+HType HCheckMaps::CalculateInferredType() {
   return value()->type();
 }
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fb5879f..970f621 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -85,7 +85,7 @@
   V(Change)                                    \
   V(CheckFunction)                             \
   V(CheckInstanceType)                         \
-  V(CheckMap)                                  \
+  V(CheckMaps)                                 \
   V(CheckNonSmi)                               \
   V(CheckPrototypeMaps)                        \
   V(CheckSmi)                                  \
@@ -1998,14 +1998,9 @@
 };
 
 
-class HCheckMap: public HTemplateInstruction<2> {
+class HCheckMaps: public HTemplateInstruction<2> {
  public:
-  HCheckMap(HValue* value,
-            Handle<Map> map,
-            HValue* typecheck = NULL,
-            CompareMapMode mode = REQUIRE_EXACT_MAP)
-      : map_(map),
-        mode_(mode) {
+  HCheckMaps(HValue* value, Handle<Map> map, HValue* typecheck = NULL) {
     SetOperandAt(0, value);
     // If callers don't depend on a typecheck, they can pass in NULL. In that
     // case we use a copy of the |value| argument as a dummy value.
@@ -2013,14 +2008,49 @@
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetGVNFlag(kDependsOnMaps);
-    // If the map to check doesn't have the untransitioned elements, it must not
-    // be hoisted above TransitionElements instructions.
-    if (mode == REQUIRE_EXACT_MAP || !map->has_fast_smi_only_elements()) {
-      SetGVNFlag(kDependsOnElementsKind);
+    SetGVNFlag(kDependsOnElementsKind);
+    map_set()->Add(map);
+  }
+  HCheckMaps(HValue* value, SmallMapList* maps) {
+    SetOperandAt(0, value);
+    SetOperandAt(1, value);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetGVNFlag(kDependsOnMaps);
+    SetGVNFlag(kDependsOnElementsKind);
+    for (int i = 0; i < maps->length(); i++) {
+      map_set()->Add(maps->at(i));
     }
-    has_element_transitions_ =
-        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
-        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
+    map_set()->Sort();
+  }
+
+  static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map) {
+    HCheckMaps* check_map = new HCheckMaps(object, map);
+    SmallMapList* map_set = check_map->map_set();
+
+    // If the map to check has the untransitioned elements, it can be hoisted
+    // above TransitionElements instructions.
+    if (map->has_fast_smi_only_elements()) {
+      check_map->ClearGVNFlag(kDependsOnElementsKind);
+    }
+
+    Map* transitioned_fast_element_map =
+        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
+    ASSERT(transitioned_fast_element_map == NULL ||
+           map->elements_kind() != FAST_ELEMENTS);
+    if (transitioned_fast_element_map != NULL) {
+      map_set->Add(Handle<Map>(transitioned_fast_element_map));
+    }
+    Map* transitioned_double_map =
+        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
+    ASSERT(transitioned_double_map == NULL ||
+           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+    if (transitioned_double_map != NULL) {
+      map_set->Add(Handle<Map>(transitioned_double_map));
+    }
+    map_set->Sort();
+
+    return check_map;
   }
 
   virtual Representation RequiredInputRepresentation(int index) {
@@ -2030,25 +2060,23 @@
   virtual HType CalculateInferredType();
 
   HValue* value() { return OperandAt(0); }
-  Handle<Map> map() const { return map_; }
-  CompareMapMode mode() const { return mode_; }
+  SmallMapList* map_set() { return &map_set_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap)
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
 
  protected:
   virtual bool DataEquals(HValue* other) {
-    HCheckMap* b = HCheckMap::cast(other);
-    // Two CheckMaps instructions are DataEqual if their maps are identical and
-    // they have the same mode. The mode comparison can be ignored if the map
-    // has no elements transitions.
-    return map_.is_identical_to(b->map()) &&
-        (b->mode() == mode() || !has_element_transitions_);
+    HCheckMaps* b = HCheckMaps::cast(other);
+    // Relies on the fact that map_set has been sorted before.
+    if (map_set()->length() != b->map_set()->length()) return false;
+    for (int i = 0; i < map_set()->length(); i++) {
+      if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
+    }
+    return true;
   }
 
  private:
-  bool has_element_transitions_;
-  Handle<Map> map_;
-  CompareMapMode mode_;
+  SmallMapList map_set_;
 };
 
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 9b77408..8c55a32 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -3910,21 +3910,22 @@
 }
 
 
-// Sets the lookup result and returns true if the store can be inlined.
-static bool ComputeStoredField(Handle<Map> type,
-                               Handle<String> name,
-                               LookupResult* lookup) {
+// Sets the lookup result and returns true if the load/store can be inlined.
+static bool ComputeLoadStoreField(Handle<Map> type,
+                                  Handle<String> name,
+                                  LookupResult* lookup,
+                                  bool is_store) {
   type->LookupInDescriptors(NULL, *name, lookup);
   if (!lookup->IsFound()) return false;
   if (lookup->type() == FIELD) return true;
-  return (lookup->type() == MAP_TRANSITION) &&
+  return is_store && (lookup->type() == MAP_TRANSITION) &&
       (type->unused_property_fields() > 0);
 }
 
 
-static int ComputeStoredFieldIndex(Handle<Map> type,
-                                   Handle<String> name,
-                                   LookupResult* lookup) {
+static int ComputeLoadStoreFieldIndex(Handle<Map> type,
+                                      Handle<String> name,
+                                      LookupResult* lookup) {
   ASSERT(lookup->type() == FIELD || lookup->type() == MAP_TRANSITION);
   if (lookup->type() == FIELD) {
     return lookup->GetLocalFieldIndexFromMap(*type);
@@ -3943,11 +3944,10 @@
                                                   bool smi_and_map_check) {
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(object));
-    AddInstruction(new(zone()) HCheckMap(object, type, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(HCheckMaps::NewWithTransitions(object, type));
   }
 
-  int index = ComputeStoredFieldIndex(type, name, lookup);
+  int index = ComputeLoadStoreFieldIndex(type, name, lookup);
   bool is_in_object = index < 0;
   int offset = index * kPointerSize;
   if (index < 0) {
@@ -3993,7 +3993,7 @@
   LookupResult lookup(isolate());
   Handle<Map> type = prop->GetReceiverType();
   bool is_monomorphic = prop->IsMonomorphic() &&
-      ComputeStoredField(type, name, &lookup);
+      ComputeLoadStoreField(type, name, &lookup, true);
 
   return is_monomorphic
       ? BuildStoreNamedField(object, name, value, type, &lookup,
@@ -4015,7 +4015,7 @@
   LookupResult lookup(isolate());
   SmallMapList* types = expr->GetReceiverTypes();
   bool is_monomorphic = expr->IsMonomorphic() &&
-      ComputeStoredField(types->first(), name, &lookup);
+      ComputeLoadStoreField(types->first(), name, &lookup, true);
 
   return is_monomorphic
       ? BuildStoreNamedField(object, name, value, types->first(), &lookup,
@@ -4024,6 +4024,59 @@
 }
 
 
+void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
+                                                    HValue* object,
+                                                    SmallMapList* types,
+                                                    Handle<String> name) {
+  int count = 0;
+  int previous_field_offset = 0;
+  bool previous_field_is_in_object = false;
+  bool is_monomorphic_field = true;
+  Handle<Map> map;
+  LookupResult lookup(isolate());
+  for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+    map = types->at(i);
+    if (ComputeLoadStoreField(map, name, &lookup, false)) {
+      int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
+      bool is_in_object = index < 0;
+      int offset = index * kPointerSize;
+      if (index < 0) {
+        // Negative property indices are in-object properties, indexed
+        // from the end of the fixed part of the object.
+        offset += map->instance_size();
+      } else {
+        offset += FixedArray::kHeaderSize;
+      }
+      if (count == 0) {
+        previous_field_offset = offset;
+        previous_field_is_in_object = is_in_object;
+      } else if (is_monomorphic_field) {
+        is_monomorphic_field = (offset == previous_field_offset) &&
+                               (is_in_object == previous_field_is_in_object);
+      }
+      ++count;
+    }
+  }
+
+  // Use monomorphic load if property lookup results in the same field index
+  // for all maps.  Requires special map check on the set of all handled maps.
+  HInstruction* instr;
+  if (count == types->length() && is_monomorphic_field) {
+    AddInstruction(new(zone()) HCheckMaps(object, types));
+    instr = BuildLoadNamedField(object, expr, map, &lookup, false);
+  } else {
+    HValue* context = environment()->LookupContext();
+    instr = new(zone()) HLoadNamedFieldPolymorphic(context,
+                                                   object,
+                                                   types,
+                                                   name);
+  }
+
+  instr->set_position(expr->position());
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
 void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
                                                      HValue* object,
                                                      HValue* value,
@@ -4037,7 +4090,7 @@
   for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
     Handle<Map> map = types->at(i);
     LookupResult lookup(isolate());
-    if (ComputeStoredField(map, name, &lookup)) {
+    if (ComputeLoadStoreField(map, name, &lookup, true)) {
       if (count == 0) {
         AddInstruction(new(zone()) HCheckNonSmi(object));  // Only needed once.
         join = graph()->CreateBasicBlock();
@@ -4508,8 +4561,7 @@
                                                     bool smi_and_map_check) {
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(object));
-    AddInstruction(new(zone()) HCheckMap(object, type, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(HCheckMaps::NewWithTransitions(object, type));
   }
 
   int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -4553,8 +4605,7 @@
                                true);
   } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
     AddInstruction(new(zone()) HCheckNonSmi(obj));
-    AddInstruction(new(zone()) HCheckMap(obj, map, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(HCheckMaps::NewWithTransitions(obj, map));
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
     return new(zone()) HConstant(function, Representation::Tagged());
   } else {
@@ -4656,12 +4707,12 @@
                                                            HValue* val,
                                                            Handle<Map> map,
                                                            bool is_store) {
-  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map));
   bool fast_smi_only_elements = map->has_fast_smi_only_elements();
   bool fast_elements = map->has_fast_elements();
   HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
   if (is_store && (fast_elements || fast_smi_only_elements)) {
-    AddInstruction(new(zone()) HCheckMap(
+    AddInstruction(new(zone()) HCheckMaps(
         elements, isolate()->factory()->fixed_array_map()));
   }
   HInstruction* length = NULL;
@@ -4811,7 +4862,7 @@
           elements_kind == FAST_ELEMENTS ||
           elements_kind == FAST_DOUBLE_ELEMENTS) {
         if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
-          AddInstruction(new(zone()) HCheckMap(
+          AddInstruction(new(zone()) HCheckMaps(
               elements, isolate()->factory()->fixed_array_map(),
               elements_kind_branch));
         }
@@ -5015,8 +5066,8 @@
       instr = BuildLoadNamed(obj, expr, types->first(), name);
     } else if (types != NULL && types->length() > 1) {
       AddInstruction(new(zone()) HCheckNonSmi(obj));
-      HValue* context = environment()->LookupContext();
-      instr = new(zone()) HLoadNamedFieldPolymorphic(context, obj, types, name);
+      HandlePolymorphicLoadNamedField(expr, obj, types, name);
+      return;
     } else {
       instr = BuildLoadNamedGeneric(obj, expr);
     }
@@ -5057,8 +5108,7 @@
   // its prototypes.
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(receiver));
-    AddInstruction(new(zone()) HCheckMap(receiver, receiver_map, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map));
   }
   if (!expr->holder().is_null()) {
     AddInstruction(new(zone()) HCheckPrototypeMaps(
@@ -6869,11 +6919,9 @@
         Handle<Map> map = oracle()->GetCompareMap(expr);
         if (!map.is_null()) {
           AddInstruction(new(zone()) HCheckNonSmi(left));
-          AddInstruction(new(zone()) HCheckMap(left, map, NULL,
-                                               ALLOW_ELEMENT_TRANSITION_MAPS));
+          AddInstruction(HCheckMaps::NewWithTransitions(left, map));
           AddInstruction(new(zone()) HCheckNonSmi(right));
-          AddInstruction(new(zone()) HCheckMap(right, map, NULL,
-                                               ALLOW_ELEMENT_TRANSITION_MAPS));
+          AddInstruction(HCheckMaps::NewWithTransitions(right, map));
           HCompareObjectEqAndBranch* result =
               new(zone()) HCompareObjectEqAndBranch(left, right);
           result->set_position(expr->position());
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e2779bb..bc9bc9d 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1029,6 +1029,10 @@
 
   void HandlePropertyAssignment(Assignment* expr);
   void HandleCompoundAssignment(Assignment* expr);
+  void HandlePolymorphicLoadNamedField(Property* expr,
+                                       HValue* object,
+                                       SmallMapList* types,
+                                       Handle<String> name);
   void HandlePolymorphicStoreNamedField(Assignment* expr,
                                         HValue* object,
                                         HValue* value,
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 9adc78e..929b485 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -97,16 +97,25 @@
   int code_;
 };
 
+const int kRegister_eax_Code = 0;
+const int kRegister_ecx_Code = 1;
+const int kRegister_edx_Code = 2;
+const int kRegister_ebx_Code = 3;
+const int kRegister_esp_Code = 4;
+const int kRegister_ebp_Code = 5;
+const int kRegister_esi_Code = 6;
+const int kRegister_edi_Code = 7;
+const int kRegister_no_reg_Code = -1;
 
-const Register eax = { 0 };
-const Register ecx = { 1 };
-const Register edx = { 2 };
-const Register ebx = { 3 };
-const Register esp = { 4 };
-const Register ebp = { 5 };
-const Register esi = { 6 };
-const Register edi = { 7 };
-const Register no_reg = { -1 };
+const Register eax = { kRegister_eax_Code };
+const Register ecx = { kRegister_ecx_Code };
+const Register edx = { kRegister_edx_Code };
+const Register ebx = { kRegister_ebx_Code };
+const Register esp = { kRegister_esp_Code };
+const Register ebp = { kRegister_ebp_Code };
+const Register esi = { kRegister_esi_Code };
+const Register edi = { kRegister_edi_Code };
+const Register no_reg = { kRegister_no_reg_Code };
 
 
 inline const char* Register::AllocationIndexToString(int index) {
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index d3e2a91..4faa6a4 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -7024,44 +7024,47 @@
 };
 
 
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
   // Used in RegExpExecStub.
-  { ebx, eax, edi, EMIT_REMEMBERED_SET },
+  { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
   // Used in CompileArrayPushCall.
-  { ebx, ecx, edx, EMIT_REMEMBERED_SET },
-  { ebx, edi, edx, OMIT_REMEMBERED_SET },
+  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
+  { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
   // Used in CompileStoreGlobal and CallFunctionStub.
-  { ebx, ecx, edx, OMIT_REMEMBERED_SET },
+  { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
   // Used in StoreStubCompiler::CompileStoreField and
   // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { edx, ecx, ebx, EMIT_REMEMBERED_SET },
+  { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
   // GenerateStoreField calls the stub with two different permutations of
   // registers.  This is the second.
-  { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
   // StoreIC::GenerateNormal via GenerateDictionaryStore
-  { ebx, edi, edx, EMIT_REMEMBERED_SET },
+  { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
   // KeyedStoreIC::GenerateGeneric.
-  { ebx, edx, ecx, EMIT_REMEMBERED_SET},
+  { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
   // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { edi, ebx, ecx, EMIT_REMEMBERED_SET},
-  { edx, edi, ebx, EMIT_REMEMBERED_SET},
+  { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
+  { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
   // ElementsTransitionGenerator::GenerateSmiOnlyToObject
   // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
   // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { edx, ebx, edi, EMIT_REMEMBERED_SET},
-  { edx, ebx, edi, OMIT_REMEMBERED_SET},
+  { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
+  { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
   // ElementsTransitionGenerator::GenerateDoubleToObject
-  { eax, edx, esi, EMIT_REMEMBERED_SET},
-  { edx, eax, edi, EMIT_REMEMBERED_SET},
+  { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
+  { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
   // StoreArrayLiteralElementStub::Generate
-  { ebx, eax, ecx, EMIT_REMEMBERED_SET},
+  { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
   // Null termination.
-  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
 
+#undef REG
 
 bool RecordWriteStub::IsPregenerated() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
        !entry->object.is(no_reg);
        entry++) {
     if (object_.is(entry->object) &&
@@ -7089,7 +7092,7 @@
 
 
 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
        !entry->object.is(no_reg);
        entry++) {
     RecordWriteStub stub(entry->object,
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 92d7cc1..3f10c09 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -239,13 +239,13 @@
   // ok:
 
   if (FLAG_count_based_interrupts) {
-    ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
-    ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
+    ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+    ASSERT_EQ(kJnsOffset,      *(call_target_address - 2));
   } else {
-    ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
-    ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
+    ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
+    ASSERT_EQ(kJaeOffset,      *(call_target_address - 2));
   }
-  ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
+  ASSERT_EQ(kCallInstruction,  *(call_target_address - 1));
   *(call_target_address - 3) = kNopByteOne;
   *(call_target_address - 2) = kNopByteTwo;
   Assembler::set_target_address_at(call_target_address,
@@ -266,9 +266,9 @@
 
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
-  ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
-  ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
+  ASSERT_EQ(kNopByteOne,      *(call_target_address - 3));
+  ASSERT_EQ(kNopByteTwo,      *(call_target_address - 2));
+  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
   if (FLAG_count_based_interrupts) {
     *(call_target_address - 3) = kJnsInstruction;
     *(call_target_address - 2) = kJnsOffset;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 62a2c2a..376671d 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -101,13 +101,6 @@
 };
 
 
-// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
-int FullCodeGenerator::self_optimization_header_size() {
-  UNREACHABLE();
-  return 13;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 8fb4c79..67e2dca 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -4181,12 +4181,21 @@
 }
 
 
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
+
+  Label success;
+  SmallMapList* map_set = instr->hydrogen()->map_set();
+  for (int i = 0; i < map_set->length() - 1; i++) {
+    Handle<Map> map = map_set->at(i);
+    __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
+    __ j(equal, &success);
+  }
+  Handle<Map> map = map_set->last();
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+  __ bind(&success);
 }
 
 
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 2bfbb67..a408bdf 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1800,9 +1800,9 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new(zone()) LCheckMap(value);
+  LCheckMaps* result = new(zone()) LCheckMaps(value);
   return AssignEnvironment(result);
 }
 
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 4ecce96..0cfed12 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -65,7 +65,7 @@
   V(CallStub)                                   \
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
-  V(CheckMap)                                   \
+  V(CheckMaps)                                  \
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -1949,14 +1949,14 @@
 };
 
 
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
  public:
-  explicit LCheckMap(LOperand* value) {
+  explicit LCheckMaps(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
-  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
 };
 
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 04d6b62..747b79a 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -501,9 +501,13 @@
 void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
                                                       uint32_t mask,
                                                       Label* on_equal) {
-  __ mov(eax, current_character());
-  __ and_(eax, mask);
-  __ cmp(eax, c);
+  if (c == 0) {
+    __ test(current_character(), Immediate(mask));
+  } else {
+    __ mov(eax, current_character());
+    __ and_(eax, mask);
+    __ cmp(eax, c);
+  }
   BranchOrBacktrack(equal, on_equal);
 }
 
@@ -511,9 +515,13 @@
 void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
                                                          uint32_t mask,
                                                          Label* on_not_equal) {
-  __ mov(eax, current_character());
-  __ and_(eax, mask);
-  __ cmp(eax, c);
+  if (c == 0) {
+    __ test(current_character(), Immediate(mask));
+  } else {
+    __ mov(eax, current_character());
+    __ and_(eax, mask);
+    __ cmp(eax, c);
+  }
   BranchOrBacktrack(not_equal, on_not_equal);
 }
 
@@ -525,12 +533,51 @@
     Label* on_not_equal) {
   ASSERT(minus < String::kMaxUtf16CodeUnit);
   __ lea(eax, Operand(current_character(), -minus));
-  __ and_(eax, mask);
-  __ cmp(eax, c);
+  if (c == 0) {
+    __ test(eax, Immediate(mask));
+  } else {
+    __ and_(eax, mask);
+    __ cmp(eax, c);
+  }
   BranchOrBacktrack(not_equal, on_not_equal);
 }
 
 
+void RegExpMacroAssemblerIA32::CheckCharacterInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_in_range) {
+  __ lea(eax, Operand(current_character(), -from));
+  __ cmp(eax, to - from);
+  BranchOrBacktrack(below_equal, on_in_range);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_not_in_range) {
+  __ lea(eax, Operand(current_character(), -from));
+  __ cmp(eax, to - from);
+  BranchOrBacktrack(above, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerIA32::CheckBitInTable(
+    Handle<ByteArray> table,
+    Label* on_bit_set) {
+  __ mov(eax, Immediate(table));
+  Register index = current_character();
+  if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+    __ mov(ebx, current_character());
+    __ and_(ebx, kTableSize - 1);
+    index = ebx;
+  }
+  __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
+  BranchOrBacktrack(not_equal, on_bit_set);
+}
+
+
 bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
                                                           Label* on_no_match) {
   // Range checks (c in min..max) are generally implemented by an unsigned
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index d504470..78cd069 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -78,6 +78,14 @@
                                               uc16 minus,
                                               uc16 mask,
                                               Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
   // Checks whether the given offset from the current position is before
   // the end of the string.
   virtual void CheckPosition(int cp_offset, Label* on_outside_input);
diff --git a/src/ic.cc b/src/ic.cc
index 68bc30f..b8d4b40 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -296,58 +296,44 @@
 }
 
 
+static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
+  bool was_uninitialized =
+      old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
+  bool is_uninitialized =
+      new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
+  return (was_uninitialized && !is_uninitialized) ?  1 :
+         (!was_uninitialized && is_uninitialized) ? -1 : 0;
+}
+
+
 void IC::PostPatching(Address address, Code* target, Code* old_target) {
-  if (FLAG_type_info_threshold > 0) {
-    if (old_target->is_inline_cache_stub() &&
-        target->is_inline_cache_stub()) {
-      State old_state = old_target->ic_state();
-      State new_state = target->ic_state();
-      bool was_uninitialized =
-          old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
-      bool is_uninitialized =
-          new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
-      int delta = 0;
-      if (was_uninitialized && !is_uninitialized) {
-        delta = 1;
-      } else if (!was_uninitialized && is_uninitialized) {
-        delta = -1;
-      }
-      if (delta != 0) {
-        Code* host = target->GetHeap()->isolate()->
-            inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
-        // Not all Code objects have TypeFeedbackInfo.
-        if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
-          TypeFeedbackInfo* info =
-              TypeFeedbackInfo::cast(host->type_feedback_info());
-          info->set_ic_with_typeinfo_count(
-              info->ic_with_typeinfo_count() + delta);
-        }
-      }
+  if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
+    return;
+  }
+  Code* host = target->GetHeap()->isolate()->
+      inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+  if (host->kind() != Code::FUNCTION) return;
+
+  if (FLAG_type_info_threshold > 0 &&
+      old_target->is_inline_cache_stub() &&
+      target->is_inline_cache_stub()) {
+    int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
+                                          target->ic_state());
+    // Not all Code objects have TypeFeedbackInfo.
+    if (delta != 0 && host->type_feedback_info()->IsTypeFeedbackInfo()) {
+      TypeFeedbackInfo* info =
+          TypeFeedbackInfo::cast(host->type_feedback_info());
+      info->set_ic_with_type_info_count(
+          info->ic_with_type_info_count() + delta);
     }
   }
   if (FLAG_watch_ic_patching) {
+    host->set_profiler_ticks(0);
     Isolate::Current()->runtime_profiler()->NotifyICChanged();
-    // We do not want to optimize until the ICs have settled down,
-    // so when they are patched, we postpone optimization for the
-    // current function and the functions above it on the stack that
-    // might want to inline this one.
-    StackFrameIterator it;
-    if (it.done()) return;
-    it.Advance();
-    static const int kStackFramesToMark = Compiler::kMaxInliningLevels - 1;
-    for (int i = 0; i < kStackFramesToMark; ++i) {
-      if (it.done()) return;
-      StackFrame* raw_frame = it.frame();
-      if (raw_frame->is_java_script()) {
-        JSFunction* function =
-            JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
-        if (function->IsOptimized()) continue;
-        SharedFunctionInfo* shared = function->shared();
-        shared->set_profiler_ticks(0);
-      }
-      it.Advance();
-    }
   }
+  // TODO(2029): When an optimized function is patched, it would
+  // be nice to propagate the corresponding type information to its
+  // unoptimized version for the benefit of later inlining.
 }
 
 
@@ -1374,7 +1360,7 @@
       // Strict mode doesn't allow setting non-existent global property
       // or an assignment to a read only property.
       if (strict_mode == kStrictMode) {
-        if (lookup.IsFound() && lookup.IsReadOnly()) {
+        if (lookup.IsProperty() && lookup.IsReadOnly()) {
           return TypeError("strict_read_only_property", object, name);
         } else if (IsContextual(object)) {
           return ReferenceError("not_defined", name);
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index b337e88..3a92b84 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -33,8 +33,9 @@
 #include "utils.h"
 #include "ast.h"
 #include "bytecodes-irregexp.h"
-#include "jsregexp.h"
 #include "interpreter-irregexp.h"
+#include "jsregexp.h"
+#include "regexp-macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -449,6 +450,37 @@
         }
         break;
       }
+      BYTECODE(CHECK_CHAR_IN_RANGE) {
+        uint32_t from = Load16Aligned(pc + 4);
+        uint32_t to = Load16Aligned(pc + 6);
+        if (from <= current_char && current_char <= to) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_CHECK_CHAR_IN_RANGE_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_CHAR_NOT_IN_RANGE) {
+        uint32_t from = Load16Aligned(pc + 4);
+        uint32_t to = Load16Aligned(pc + 6);
+        if (from > current_char || current_char > to) {
+          pc = code_base + Load32Aligned(pc + 8);
+        } else {
+          pc += BC_CHECK_CHAR_NOT_IN_RANGE_LENGTH;
+        }
+        break;
+      }
+      BYTECODE(CHECK_BIT_IN_TABLE) {
+        int mask = RegExpMacroAssembler::kTableMask;
+        byte b = pc[8 + ((current_char & mask) >> kBitsPerByteLog2)];
+        int bit = (current_char & (kBitsPerByte - 1));
+        if ((b & (1 << bit)) != 0) {
+          pc = code_base + Load32Aligned(pc + 4);
+        } else {
+          pc += BC_CHECK_BIT_IN_TABLE_LENGTH;
+        }
+        break;
+      }
       BYTECODE(CHECK_LT) {
         uint32_t limit = (insn >> BYTECODE_SHIFT);
         if (current_char < limit) {
@@ -488,59 +520,6 @@
           pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
         }
         break;
-      BYTECODE(LOOKUP_MAP1) {
-        // Look up character in a bitmap.  If we find a 0, then jump to the
-        // location at pc + 8.  Otherwise fall through!
-        int index = current_char - (insn >> BYTECODE_SHIFT);
-        byte map = code_base[Load32Aligned(pc + 4) + (index >> 3)];
-        map = ((map >> (index & 7)) & 1);
-        if (map == 0) {
-          pc = code_base + Load32Aligned(pc + 8);
-        } else {
-          pc += BC_LOOKUP_MAP1_LENGTH;
-        }
-        break;
-      }
-      BYTECODE(LOOKUP_MAP2) {
-        // Look up character in a half-nibble map.  If we find 00, then jump to
-        // the location at pc + 8.   If we find 01 then jump to location at
-        // pc + 11, etc.
-        int index = (current_char - (insn >> BYTECODE_SHIFT)) << 1;
-        byte map = code_base[Load32Aligned(pc + 3) + (index >> 3)];
-        map = ((map >> (index & 7)) & 3);
-        if (map < 2) {
-          if (map == 0) {
-            pc = code_base + Load32Aligned(pc + 8);
-          } else {
-            pc = code_base + Load32Aligned(pc + 12);
-          }
-        } else {
-          if (map == 2) {
-            pc = code_base + Load32Aligned(pc + 16);
-          } else {
-            pc = code_base + Load32Aligned(pc + 20);
-          }
-        }
-        break;
-      }
-      BYTECODE(LOOKUP_MAP8) {
-        // Look up character in a byte map.  Use the byte as an index into a
-        // table that follows this instruction immediately.
-        int index = current_char - (insn >> BYTECODE_SHIFT);
-        byte map = code_base[Load32Aligned(pc + 4) + index];
-        const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
-        pc = code_base + Load32Aligned(new_pc);
-        break;
-      }
-      BYTECODE(LOOKUP_HI_MAP8) {
-        // Look up high byte of this character in a byte map.  Use the byte as
-        // an index into a table that follows this instruction immediately.
-        int index = (current_char >> 8) - (insn >> BYTECODE_SHIFT);
-        byte map = code_base[Load32Aligned(pc + 4) + index];
-        const byte* new_pc = code_base + Load32Aligned(pc + 8) + (map << 2);
-        pc = code_base + Load32Aligned(new_pc);
-        break;
-      }
       BYTECODE(CHECK_NOT_REGS_EQUAL)
         if (registers[insn >> BYTECODE_SHIFT] ==
             registers[Load32Aligned(pc + 4)]) {
diff --git a/src/isolate.cc b/src/isolate.cc
index 3dfcbb5..625cc56 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -38,9 +38,11 @@
 #include "heap-profiler.h"
 #include "hydrogen.h"
 #include "isolate.h"
+#include "lazy-instance.h"
 #include "lithium-allocator.h"
 #include "log.h"
 #include "messages.h"
+#include "platform.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
@@ -55,6 +57,31 @@
 namespace v8 {
 namespace internal {
 
+struct GlobalState {
+  Thread::LocalStorageKey per_isolate_thread_data_key;
+  Thread::LocalStorageKey isolate_key;
+  Thread::LocalStorageKey thread_id_key;
+  Isolate* default_isolate;
+  Isolate::ThreadDataTable* thread_data_table;
+  Mutex* mutex;
+};
+
+struct InitializeGlobalState {
+  static void Construct(GlobalState* state) {
+    state->isolate_key = Thread::CreateThreadLocalKey();
+    state->thread_id_key = Thread::CreateThreadLocalKey();
+    state->per_isolate_thread_data_key = Thread::CreateThreadLocalKey();
+    state->thread_data_table = new Isolate::ThreadDataTable();
+    state->default_isolate = new Isolate();
+    state->mutex = OS::CreateMutex();
+    // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
+    // because a non-null thread data may be already set.
+    Thread::SetThreadLocal(state->isolate_key, state->default_isolate);
+  }
+};
+
+static LazyInstance<GlobalState, InitializeGlobalState>::type global_state;
+
 Atomic32 ThreadId::highest_thread_id_ = 0;
 
 int ThreadId::AllocateThreadId() {
@@ -64,10 +91,11 @@
 
 
 int ThreadId::GetCurrentThreadId() {
-  int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+  const GlobalState& global = global_state.Get();
+  int thread_id = Thread::GetThreadLocalInt(global.thread_id_key);
   if (thread_id == 0) {
     thread_id = AllocateThreadId();
-    Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
+    Thread::SetThreadLocalInt(global.thread_id_key, thread_id);
   }
   return thread_id;
 }
@@ -311,44 +339,16 @@
   storage->LinkTo(&free_list_);
 }
 
-
-Isolate* Isolate::default_isolate_ = NULL;
-Thread::LocalStorageKey Isolate::isolate_key_;
-Thread::LocalStorageKey Isolate::thread_id_key_;
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
-Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-
-
-class IsolateInitializer {
- public:
-  IsolateInitializer() {
-    Isolate::EnsureDefaultIsolate();
-  }
-};
-
-static IsolateInitializer* EnsureDefaultIsolateAllocated() {
-  // TODO(isolates): Use the system threading API to do this once?
-  static IsolateInitializer static_initializer;
-  return &static_initializer;
-}
-
-// This variable only needed to trigger static intialization.
-static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
-
-
-
-
-
 Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
     ThreadId thread_id) {
   ASSERT(!thread_id.Equals(ThreadId::Invalid()));
   PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
   {
-    ScopedLock lock(process_wide_mutex_);
-    ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
-    thread_data_table_->Insert(per_thread);
-    ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
+    GlobalState* const global = global_state.Pointer();
+    ScopedLock lock(global->mutex);
+    ASSERT(global->thread_data_table->Lookup(this, thread_id) == NULL);
+    global->thread_data_table->Insert(per_thread);
+    ASSERT(global->thread_data_table->Lookup(this, thread_id) == per_thread);
   }
   return per_thread;
 }
@@ -359,8 +359,9 @@
   ThreadId thread_id = ThreadId::Current();
   PerIsolateThreadData* per_thread = NULL;
   {
-    ScopedLock lock(process_wide_mutex_);
-    per_thread = thread_data_table_->Lookup(this, thread_id);
+    GlobalState* const global = global_state.Pointer();
+    ScopedLock lock(global->mutex);
+    per_thread = global->thread_data_table->Lookup(this, thread_id);
     if (per_thread == NULL) {
       per_thread = AllocatePerIsolateThreadData(thread_id);
     }
@@ -373,26 +374,25 @@
   ThreadId thread_id = ThreadId::Current();
   PerIsolateThreadData* per_thread = NULL;
   {
-    ScopedLock lock(process_wide_mutex_);
-    per_thread = thread_data_table_->Lookup(this, thread_id);
+    GlobalState* const global = global_state.Pointer();
+    ScopedLock lock(global->mutex);
+    per_thread = global->thread_data_table->Lookup(this, thread_id);
   }
   return per_thread;
 }
 
 
+bool Isolate::IsDefaultIsolate() const {
+  return this == global_state.Get().default_isolate;
+}
+
+
 void Isolate::EnsureDefaultIsolate() {
-  ScopedLock lock(process_wide_mutex_);
-  if (default_isolate_ == NULL) {
-    isolate_key_ = Thread::CreateThreadLocalKey();
-    thread_id_key_ = Thread::CreateThreadLocalKey();
-    per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
-    thread_data_table_ = new Isolate::ThreadDataTable();
-    default_isolate_ = new Isolate();
-  }
+  GlobalState* const global = global_state.Pointer();
   // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
-  // becase a non-null thread data may be already set.
-  if (Thread::GetThreadLocal(isolate_key_) == NULL) {
-    Thread::SetThreadLocal(isolate_key_, default_isolate_);
+  // because a non-null thread data may be already set.
+  if (Thread::GetThreadLocal(global->isolate_key) == NULL) {
+    Thread::SetThreadLocal(global->isolate_key, global->default_isolate);
   }
 }
 
@@ -400,32 +400,48 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 Debugger* Isolate::GetDefaultIsolateDebugger() {
   EnsureDefaultIsolate();
-  return default_isolate_->debugger();
+  return global_state.Pointer()->default_isolate->debugger();
 }
 #endif
 
 
 StackGuard* Isolate::GetDefaultIsolateStackGuard() {
   EnsureDefaultIsolate();
-  return default_isolate_->stack_guard();
+  return global_state.Pointer()->default_isolate->stack_guard();
+}
+
+
+Thread::LocalStorageKey Isolate::isolate_key() {
+  return global_state.Get().isolate_key;
+}
+
+
+Thread::LocalStorageKey Isolate::thread_id_key() {
+  return global_state.Get().thread_id_key;
+}
+
+
+Thread::LocalStorageKey Isolate::per_isolate_thread_data_key() {
+  return global_state.Get().per_isolate_thread_data_key;
 }
 
 
 void Isolate::EnterDefaultIsolate() {
   EnsureDefaultIsolate();
-  ASSERT(default_isolate_ != NULL);
+  Isolate* const default_isolate = global_state.Pointer()->default_isolate;
+  ASSERT(default_isolate != NULL);
 
   PerIsolateThreadData* data = CurrentPerIsolateThreadData();
   // If not yet in default isolate - enter it.
-  if (data == NULL || data->isolate() != default_isolate_) {
-    default_isolate_->Enter();
+  if (data == NULL || data->isolate() != default_isolate) {
+    default_isolate->Enter();
   }
 }
 
 
 Isolate* Isolate::GetDefaultIsolateForLocking() {
   EnsureDefaultIsolate();
-  return default_isolate_;
+  return global_state.Pointer()->default_isolate;
 }
 
 
@@ -1548,8 +1564,8 @@
 
   Deinit();
 
-  { ScopedLock lock(process_wide_mutex_);
-    thread_data_table_->RemoveAllThreads(this);
+  { ScopedLock lock(global_state.Pointer()->mutex);
+    global_state.Pointer()->thread_data_table->RemoveAllThreads(this);
   }
 
   if (!IsDefaultIsolate()) {
@@ -1602,8 +1618,9 @@
 
 void Isolate::SetIsolateThreadLocals(Isolate* isolate,
                                      PerIsolateThreadData* data) {
-  Thread::SetThreadLocal(isolate_key_, isolate);
-  Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
+  const GlobalState& global = global_state.Get();
+  Thread::SetThreadLocal(global.isolate_key, isolate);
+  Thread::SetThreadLocal(global.per_isolate_thread_data_key, data);
 }
 
 
diff --git a/src/isolate.h b/src/isolate.h
index 78ad08d..0c5a54c 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -430,19 +430,25 @@
   // not currently set).
   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
     return reinterpret_cast<PerIsolateThreadData*>(
-        Thread::GetThreadLocal(per_isolate_thread_data_key_));
+        Thread::GetThreadLocal(per_isolate_thread_data_key()));
   }
 
   // Returns the isolate inside which the current thread is running.
   INLINE(static Isolate* Current()) {
+    const Thread::LocalStorageKey key = isolate_key();
     Isolate* isolate = reinterpret_cast<Isolate*>(
-        Thread::GetExistingThreadLocal(isolate_key_));
+        Thread::GetExistingThreadLocal(key));
+    if (!isolate) {
+      EnsureDefaultIsolate();
+      isolate = reinterpret_cast<Isolate*>(
+          Thread::GetExistingThreadLocal(key));
+    }
     ASSERT(isolate != NULL);
     return isolate;
   }
 
   INLINE(static Isolate* UncheckedCurrent()) {
-    return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+    return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key()));
   }
 
   // Usually called by Init(), but can be called early e.g. to allow
@@ -464,7 +470,7 @@
   // for legacy API reasons.
   void TearDown();
 
-  bool IsDefaultIsolate() const { return this == default_isolate_; }
+  bool IsDefaultIsolate() const;
 
   // Ensures that process-wide resources and the default isolate have been
   // allocated. It is only necessary to call this method in rare cases, for
@@ -489,14 +495,12 @@
   // Returns the key used to store the pointer to the current isolate.
   // Used internally for V8 threads that do not execute JavaScript but still
   // are part of the domain of an isolate (like the context switcher).
-  static Thread::LocalStorageKey isolate_key() {
-    return isolate_key_;
-  }
+  static Thread::LocalStorageKey isolate_key();
 
   // Returns the key used to store process-wide thread IDs.
-  static Thread::LocalStorageKey thread_id_key() {
-    return thread_id_key_;
-  }
+  static Thread::LocalStorageKey thread_id_key();
+
+  static Thread::LocalStorageKey per_isolate_thread_data_key();
 
   // If a client attempts to create a Locker without specifying an isolate,
   // we assume that the client is using legacy behavior. Set up the current
@@ -1033,6 +1037,9 @@
  private:
   Isolate();
 
+  friend struct GlobalState;
+  friend struct InitializeGlobalState;
+
   // The per-process lock should be acquired before the ThreadDataTable is
   // modified.
   class ThreadDataTable {
@@ -1075,16 +1082,6 @@
     DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
   };
 
-  // This mutex protects highest_thread_id_, thread_data_table_ and
-  // default_isolate_.
-  static Mutex* process_wide_mutex_;
-
-  static Thread::LocalStorageKey per_isolate_thread_data_key_;
-  static Thread::LocalStorageKey isolate_key_;
-  static Thread::LocalStorageKey thread_id_key_;
-  static Isolate* default_isolate_;
-  static ThreadDataTable* thread_data_table_;
-
   void Deinit();
 
   static void SetIsolateThreadLocals(Isolate* isolate,
@@ -1106,7 +1103,7 @@
   // If one does not yet exist, allocate a new one.
   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
 
-// PreInits and returns a default isolate. Needed when a new thread tries
+  // PreInits and returns a default isolate. Needed when a new thread tries
   // to create a Locker for the first time (the lock itself is in the isolate).
   static Isolate* GetDefaultIsolateForLocking();
 
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 8ccbae4..21e3411 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -1534,6 +1534,357 @@
 }
 
 
+static void EmitBoundaryTest(RegExpMacroAssembler* masm,
+                             int border,
+                             Label* fall_through,
+                             Label* above_or_equal,
+                             Label* below) {
+  if (below != fall_through) {
+    masm->CheckCharacterLT(border, below);
+    if (above_or_equal != fall_through) masm->GoTo(above_or_equal);
+  } else {
+    masm->CheckCharacterGT(border - 1, above_or_equal);
+  }
+}
+
+
+static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm,
+                                   int first,
+                                   int last,
+                                   Label* fall_through,
+                                   Label* in_range,
+                                   Label* out_of_range) {
+  if (in_range == fall_through) {
+    if (first == last) {
+      masm->CheckNotCharacter(first, out_of_range);
+    } else {
+      masm->CheckCharacterNotInRange(first, last, out_of_range);
+    }
+  } else {
+    if (first == last) {
+      masm->CheckCharacter(first, in_range);
+    } else {
+      masm->CheckCharacterInRange(first, last, in_range);
+    }
+    if (out_of_range != fall_through) masm->GoTo(out_of_range);
+  }
+}
+
+
+// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
+// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
+static void EmitUseLookupTable(
+    RegExpMacroAssembler* masm,
+    ZoneList<int>* ranges,
+    int start_index,
+    int end_index,
+    int min_char,
+    Label* fall_through,
+    Label* even_label,
+    Label* odd_label) {
+  static const int kSize = RegExpMacroAssembler::kTableSize;
+  static const int kMask = RegExpMacroAssembler::kTableMask;
+
+  int base = (min_char & ~kMask);
+  USE(base);
+
+  // Assert that everything is on one kTableSize page.
+  for (int i = start_index; i <= end_index; i++) {
+    ASSERT_EQ(ranges->at(i) & ~kMask, base);
+  }
+  ASSERT(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
+
+  char templ[kSize];
+  Label* on_bit_set;
+  Label* on_bit_clear;
+  int bit;
+  if (even_label == fall_through) {
+    on_bit_set = odd_label;
+    on_bit_clear = even_label;
+    bit = 1;
+  } else {
+    on_bit_set = even_label;
+    on_bit_clear = odd_label;
+    bit = 0;
+  }
+  for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
+    templ[i] = bit;
+  }
+  int j = 0;
+  bit ^= 1;
+  for (int i = start_index; i < end_index; i++) {
+    for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
+      templ[j] = bit;
+    }
+    bit ^= 1;
+  }
+  for (int i = j; i < kSize; i++) {
+    templ[i] = bit;
+  }
+  // TODO(erikcorry): Cache these.
+  Handle<ByteArray> ba = FACTORY->NewByteArray(kSize, TENURED);
+  for (int i = 0; i < kSize; i++) {
+    ba->set(i, templ[i]);
+  }
+  masm->CheckBitInTable(ba, on_bit_set);
+  if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
+}
+
+
+static void CutOutRange(RegExpMacroAssembler* masm,
+                        ZoneList<int>* ranges,
+                        int start_index,
+                        int end_index,
+                        int cut_index,
+                        Label* even_label,
+                        Label* odd_label) {
+  bool odd = (((cut_index - start_index) & 1) == 1);
+  Label* in_range_label = odd ? odd_label : even_label;
+  Label dummy;
+  EmitDoubleBoundaryTest(masm,
+                         ranges->at(cut_index),
+                         ranges->at(cut_index + 1) - 1,
+                         &dummy,
+                         in_range_label,
+                         &dummy);
+  ASSERT(!dummy.is_linked());
+  // Cut out the single range by rewriting the array.  This creates a new
+  // range that is a merger of the two ranges on either side of the one we
+  // are cutting out.  The oddity of the labels is preserved.
+  for (int j = cut_index; j > start_index; j--) {
+    ranges->at(j) = ranges->at(j - 1);
+  }
+  for (int j = cut_index + 1; j < end_index; j++) {
+    ranges->at(j) = ranges->at(j + 1);
+  }
+}
+
+
+// Unicode case.  Split the search space into kSize spaces that are handled
+// with recursion.
+static void SplitSearchSpace(ZoneList<int>* ranges,
+                             int start_index,
+                             int end_index,
+                             int* new_start_index,
+                             int* new_end_index,
+                             int* border) {
+  static const int kSize = RegExpMacroAssembler::kTableSize;
+  static const int kMask = RegExpMacroAssembler::kTableMask;
+
+  int first = ranges->at(start_index);
+  int last = ranges->at(end_index) - 1;
+
+  *new_start_index = start_index;
+  *border = (ranges->at(start_index) & ~kMask) + kSize;
+  while (*new_start_index < end_index) {
+    if (ranges->at(*new_start_index) > *border) break;
+    (*new_start_index)++;
+  }
+  // new_start_index is the index of the first edge that is beyond the
+  // current kSize space.
+
+  // For very large search spaces we do a binary chop search of the non-ASCII
+  // space instead of just going to the end of the current kSize space.  The
+  // heuristics are complicated a little by the fact that any 128-character
+  // encoding space can be quickly tested with a table lookup, so we don't
+  // wish to do binary chop search at a smaller granularity than that.  A
+  // 128-character space can take up a lot of space in the ranges array if,
+  // for example, we only want to match every second character (eg. the lower
+  // case characters on some Unicode pages).
+  int binary_chop_index = (end_index + start_index) / 2;
+  // The first test ensures that we get to the code that handles the ASCII
+  // range with a single not-taken branch, speeding up this important
+  // character range (even non-ASCII charset-based text has spaces and
+  // punctuation).
+  if (*border - 1 > String::kMaxAsciiCharCode &&  // ASCII case.
+      end_index - start_index > (*new_start_index - start_index) * 2 &&
+      last - first > kSize * 2 &&
+      binary_chop_index > *new_start_index &&
+      ranges->at(binary_chop_index) >= first + 2 * kSize) {
+    int scan_forward_for_section_border = binary_chop_index;;
+    int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
+
+    while (scan_forward_for_section_border < end_index) {
+      if (ranges->at(scan_forward_for_section_border) > new_border) {
+        *new_start_index = scan_forward_for_section_border;
+        *border = new_border;
+        break;
+      }
+      scan_forward_for_section_border++;
+    }
+  }
+
+  ASSERT(*new_start_index > start_index);
+  *new_end_index = *new_start_index - 1;
+  if (ranges->at(*new_end_index) == *border) {
+    (*new_end_index)--;
+  }
+  if (*border >= ranges->at(end_index)) {
+    *border = ranges->at(end_index);
+    *new_start_index = end_index;  // Won't be used.
+    *new_end_index = end_index - 1;
+  }
+}
+
+
+// Gets a series of segment boundaries representing a character class.  If the
+// character is in the range between an even and an odd boundary (counting from
+// start_index) then go to even_label, otherwise go to odd_label.  We already
+// know that the character is in the range of min_char to max_char inclusive.
+// Either label can be NULL indicating backtracking.  Either label can also be
+// equal to the fall_through label.
+static void GenerateBranches(RegExpMacroAssembler* masm,
+                             ZoneList<int>* ranges,
+                             int start_index,
+                             int end_index,
+                             uc16 min_char,
+                             uc16 max_char,
+                             Label* fall_through,
+                             Label* even_label,
+                             Label* odd_label) {
+  int first = ranges->at(start_index);
+  int last = ranges->at(end_index) - 1;
+
+  ASSERT_LT(min_char, first);
+
+  // Just need to test if the character is before or on-or-after
+  // a particular character.
+  if (start_index == end_index) {
+    EmitBoundaryTest(masm, first, fall_through, even_label, odd_label);
+    return;
+  }
+
+  // Another almost trivial case:  There is one interval in the middle that is
+  // different from the end intervals.
+  if (start_index + 1 == end_index) {
+    EmitDoubleBoundaryTest(
+        masm, first, last, fall_through, even_label, odd_label);
+    return;
+  }
+
+  // It's not worth using table lookup if there are very few intervals in the
+  // character class.
+  if (end_index - start_index <= 6) {
+    // It is faster to test for individual characters, so we look for those
+    // first, then try arbitrary ranges in the second round.
+    static int kNoCutIndex = -1;
+    int cut = kNoCutIndex;
+    for (int i = start_index; i < end_index; i++) {
+      if (ranges->at(i) == ranges->at(i + 1) - 1) {
+        cut = i;
+        break;
+      }
+    }
+    if (cut == kNoCutIndex) cut = start_index;
+    CutOutRange(
+        masm, ranges, start_index, end_index, cut, even_label, odd_label);
+    ASSERT_GE(end_index - start_index, 2);
+    GenerateBranches(masm,
+                     ranges,
+                     start_index + 1,
+                     end_index - 1,
+                     min_char,
+                     max_char,
+                     fall_through,
+                     even_label,
+                     odd_label);
+    return;
+  }
+
+  // If there are a lot of intervals in the regexp, then we will use tables to
+  // determine whether the character is inside or outside the character class.
+  static const int kBits = RegExpMacroAssembler::kTableSizeBits;
+
+  if ((max_char >> kBits) == (min_char >> kBits)) {
+    EmitUseLookupTable(masm,
+                       ranges,
+                       start_index,
+                       end_index,
+                       min_char,
+                       fall_through,
+                       even_label,
+                       odd_label);
+    return;
+  }
+
+  if ((min_char >> kBits) != (first >> kBits)) {
+    masm->CheckCharacterLT(first, odd_label);
+    GenerateBranches(masm,
+                     ranges,
+                     start_index + 1,
+                     end_index,
+                     first,
+                     max_char,
+                     fall_through,
+                     odd_label,
+                     even_label);
+    return;
+  }
+
+  int new_start_index = 0;
+  int new_end_index = 0;
+  int border = 0;
+
+  SplitSearchSpace(ranges,
+                   start_index,
+                   end_index,
+                   &new_start_index,
+                   &new_end_index,
+                   &border);
+
+  Label handle_rest;
+  Label* above = &handle_rest;
+  if (border == last + 1) {
+    // We didn't find any section that started after the limit, so everything
+    // above the border is one of the terminal labels.
+    above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
+    ASSERT(new_end_index == end_index - 1);
+  }
+
+  ASSERT_LE(start_index, new_end_index);
+  ASSERT_LE(new_start_index, end_index);
+  ASSERT_LT(start_index, new_start_index);
+  ASSERT_LT(new_end_index, end_index);
+  ASSERT(new_end_index + 1 == new_start_index ||
+         (new_end_index + 2 == new_start_index &&
+          border == ranges->at(new_end_index + 1)));
+  ASSERT_LT(min_char, border - 1);
+  ASSERT_LT(border, max_char);
+  ASSERT_LT(ranges->at(new_end_index), border);
+  ASSERT(border < ranges->at(new_start_index) ||
+         (border == ranges->at(new_start_index) &&
+          new_start_index == end_index &&
+          new_end_index == end_index - 1 &&
+          border == last + 1));
+  ASSERT(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
+
+  masm->CheckCharacterGT(border - 1, above);
+  Label dummy;
+  GenerateBranches(masm,
+                   ranges,
+                   start_index,
+                   new_end_index,
+                   min_char,
+                   border - 1,
+                   &dummy,
+                   even_label,
+                   odd_label);
+  if (handle_rest.is_linked()) {
+    masm->Bind(&handle_rest);
+    bool flip = (new_start_index & 1) != (start_index & 1);
+    GenerateBranches(masm,
+                     ranges,
+                     new_start_index,
+                     end_index,
+                     border,
+                     max_char,
+                     &dummy,
+                     flip ? odd_label : even_label,
+                     flip ? even_label : odd_label);
+  }
+}
+
+
 static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
                           RegExpCharacterClass* cc,
                           bool ascii,
@@ -1542,6 +1893,10 @@
                           bool check_offset,
                           bool preloaded) {
   ZoneList<CharacterRange>* ranges = cc->ranges();
+  if (!CharacterRange::IsCanonical(ranges)) {
+    CharacterRange::Canonicalize(ranges);
+  }
+
   int max_char;
   if (ascii) {
     max_char = String::kMaxAsciiCharCode;
@@ -1549,11 +1904,6 @@
     max_char = String::kMaxUtf16CodeUnit;
   }
 
-  Label success;
-
-  Label* char_is_in_class =
-      cc->is_negated() ? on_failure : &success;
-
   int range_count = ranges->length();
 
   int last_valid_range = range_count - 1;
@@ -1567,8 +1917,6 @@
 
   if (last_valid_range < 0) {
     if (!cc->is_negated()) {
-      // TODO(plesner): We can remove this when the node level does our
-      // ASCII optimizations for us.
       macro_assembler->GoTo(on_failure);
     }
     if (check_offset) {
@@ -1578,6 +1926,18 @@
   }
 
   if (last_valid_range == 0 &&
+      ranges->at(0).IsEverything(max_char)) {
+    if (cc->is_negated()) {
+      macro_assembler->GoTo(on_failure);
+    } else {
+      // This is a common case hit by non-anchored expressions.
+      if (check_offset) {
+        macro_assembler->CheckPosition(cp_offset, on_failure);
+      }
+    }
+    return;
+  }
+  if (last_valid_range == 0 &&
       !cc->is_negated() &&
       ranges->at(0).IsEverything(max_char)) {
     // This is a common case hit by non-anchored expressions.
@@ -1597,64 +1957,43 @@
       return;
   }
 
-  for (int i = 0; i < last_valid_range; i++) {
+
+  // A new list with ascending entries.  Each entry is a code unit
+  // where there is a boundary between code units that are part of
+  // the class and code units that are not.  Normally we insert an
+  // entry at zero which goes to the failure label, but if there
+  // was already one there we fall through for success on that entry.
+  // Subsequent entries have alternating meaning (success/failure).
+  ZoneList<int>* range_boundaries = new ZoneList<int>(last_valid_range);
+
+  bool zeroth_entry_is_failure = !cc->is_negated();
+
+  for (int i = 0; i <= last_valid_range; i++) {
     CharacterRange& range = ranges->at(i);
-    Label next_range;
-    uc16 from = range.from();
-    uc16 to = range.to();
-    if (from > max_char) {
-      continue;
-    }
-    if (to > max_char) to = max_char;
-    if (to == from) {
-      macro_assembler->CheckCharacter(to, char_is_in_class);
+    if (range.from() == 0) {
+      ASSERT_EQ(i, 0);
+      zeroth_entry_is_failure = !zeroth_entry_is_failure;
     } else {
-      if (from != 0) {
-        macro_assembler->CheckCharacterLT(from, &next_range);
-      }
-      if (to != max_char) {
-        macro_assembler->CheckCharacterLT(to + 1, char_is_in_class);
-      } else {
-        macro_assembler->GoTo(char_is_in_class);
-      }
+      range_boundaries->Add(range.from());
     }
-    macro_assembler->Bind(&next_range);
+    range_boundaries->Add(range.to() + 1);
+  }
+  int end_index = range_boundaries->length() - 1;
+  if (range_boundaries->at(end_index) > max_char) {
+    end_index--;
   }
 
-  CharacterRange& range = ranges->at(last_valid_range);
-  uc16 from = range.from();
-  uc16 to = range.to();
-
-  if (to > max_char) to = max_char;
-  ASSERT(to >= from);
-
-  if (to == from) {
-    if (cc->is_negated()) {
-      macro_assembler->CheckCharacter(to, on_failure);
-    } else {
-      macro_assembler->CheckNotCharacter(to, on_failure);
-    }
-  } else {
-    if (from != 0) {
-      if (cc->is_negated()) {
-        macro_assembler->CheckCharacterLT(from, &success);
-      } else {
-        macro_assembler->CheckCharacterLT(from, on_failure);
-      }
-    }
-    if (to != String::kMaxUtf16CodeUnit) {
-      if (cc->is_negated()) {
-        macro_assembler->CheckCharacterLT(to + 1, on_failure);
-      } else {
-        macro_assembler->CheckCharacterGT(to, on_failure);
-      }
-    } else {
-      if (cc->is_negated()) {
-        macro_assembler->GoTo(on_failure);
-      }
-    }
-  }
-  macro_assembler->Bind(&success);
+  Label fall_through;
+  GenerateBranches(macro_assembler,
+                   range_boundaries,
+                   0,  // start_index.
+                   end_index,
+                   0,  // min_char.
+                   max_char,
+                   &fall_through,
+                   zeroth_entry_is_failure ? &fall_through : on_failure,
+                   zeroth_entry_is_failure ? on_failure : &fall_through);
+  macro_assembler->Bind(&fall_through);
 }
 
 
@@ -4226,7 +4565,7 @@
     // as a "singleton block").
     unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
     int pos = bottom;
-    while (pos < top) {
+    while (pos <= top) {
       int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
       uc16 block_end;
       if (length == 0) {
diff --git a/src/lazy-instance.h b/src/lazy-instance.h
new file mode 100644
index 0000000..09dfe21
--- /dev/null
+++ b/src/lazy-instance.h
@@ -0,0 +1,216 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed.  This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety.  The Type constructor will only ever
+// be called once, even if two threads are racing to create the object.  Get()
+// and Pointer() will always return the same, completely initialized instance.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor.  It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property.  You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance.  It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap.  This may help with the
+// performance of creating the instance, and reducing heap fragmentation.  This
+// requires that Type be a complete type so we can determine the size. See
+// notes for advanced users below for more explanations.
+//
+// Example usage:
+//   static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
+//   void SomeMethod() {
+//     my_instance.Get().SomeMethod();  // MyClass::SomeMethod()
+//
+//     MyClass* ptr = my_instance.Pointer();
+//     ptr->DoDoDo();  // MyClass::DoDoDo
+//   }
+//
+// Additionally you can override the way your instance is constructed by
+// providing your own trait:
+// Example usage:
+//   struct MyCreateTrait {
+//     static void Construct(MyClass* allocated_ptr) {
+//       new (allocated_ptr) MyClass(/* extra parameters... */);
+//     }
+//   };
+//   static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
+//      LAZY_INSTANCE_INITIALIZER;
+//
+// Notes for advanced users:
+// LazyInstance can actually be used in two different ways:
+//
+// - "Static mode" which is the default mode since it is the most efficient
+//   (no extra heap allocation). In this mode, the instance is statically
+//   allocated (stored in the global data section at compile time).
+//   The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
+//   must be used to initialize static lazy instances.
+//
+// - "Dynamic mode". In this mode, the instance is dynamically allocated and
+//   constructed (using new) by default. This mode is useful if you have to
+//   deal with some code already allocating the instance for you (e.g.
+//   OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
+//   The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
+//   dynamic lazy instances.
+
+#ifndef V8_LAZY_INSTANCE_H_
+#define V8_LAZY_INSTANCE_H_
+
+#include "once.h"
+
+namespace v8 {
+namespace internal {
+
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
+#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
+
+// Default to static mode.
+#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+template <typename T>
+struct LeakyInstanceTrait {
+  static void Destroy(T* /* instance */) {}
+};
+
+
+// Traits that define how an instance is allocated and accessed.
+
+template <typename T>
+struct StaticallyAllocatedInstanceTrait {
+  typedef char StorageType[sizeof(T)];
+
+  static T* MutableInstance(StorageType* storage) {
+    return reinterpret_cast<T*>(storage);
+  }
+
+  template <typename ConstructTrait>
+  static void InitStorageUsingTrait(StorageType* storage) {
+    ConstructTrait::Construct(MutableInstance(storage));
+  }
+};
+
+
+template <typename T>
+struct DynamicallyAllocatedInstanceTrait {
+  typedef T* StorageType;
+
+  static T* MutableInstance(StorageType* storage) {
+    return *storage;
+  }
+
+  template <typename CreateTrait>
+  static void InitStorageUsingTrait(StorageType* storage) {
+    *storage = CreateTrait::Create();
+  }
+};
+
+
+template <typename T>
+struct DefaultConstructTrait {
+  // Constructs the provided object which was already allocated.
+  static void Construct(T* allocated_ptr) {
+    new(allocated_ptr) T();
+  }
+};
+
+
+template <typename T>
+struct DefaultCreateTrait {
+  static T* Create() {
+    return new T();
+  }
+};
+
+
+// TODO(pliard): Handle instances destruction (using global destructors).
+template <typename T, typename AllocationTrait, typename CreateTrait,
+          typename DestroyTrait  /* not used yet. */ >
+struct LazyInstanceImpl {
+ public:
+  typedef typename AllocationTrait::StorageType StorageType;
+
+ private:
+  static void InitInstance(StorageType* storage) {
+    AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+  }
+
+  void Init() const {
+    CallOnce(&once_, &InitInstance, &storage_);
+  }
+
+ public:
+  T* Pointer() {
+    Init();
+    return AllocationTrait::MutableInstance(&storage_);
+  }
+
+  const T& Get() const {
+    Init();
+    return *AllocationTrait::MutableInstance(&storage_);
+  }
+
+  mutable OnceType once_;
+  // Note that the previous field, OnceType, is an AtomicWord which guarantees
+  // the correct alignment of the storage field below.
+  mutable StorageType storage_;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyStaticInstance {
+  typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>, CreateTrait,
+      DestroyTrait> type;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyInstance {
+  // A LazyInstance is a LazyStaticInstance.
+  typedef typename LazyStaticInstance<T, CreateTrait, DestroyTrait>::type type;
+};
+
+
+template <typename T,
+          typename CreateTrait = DefaultConstructTrait<T>,
+          typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyDynamicInstance {
+  typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>, CreateTrait,
+      DestroyTrait> type;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_LAZY_INSTANCE_H_
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 83805dc..4396c73 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -46,29 +46,6 @@
 namespace v8 {
 namespace internal {
 
-
-#define DEFINE_OPERAND_CACHE(name, type)                      \
-  name name::cache[name::kNumCachedOperands];                 \
-  void name::SetUpCache() {                                   \
-    for (int i = 0; i < kNumCachedOperands; i++) {            \
-      cache[i].ConvertTo(type, i);                            \
-    }                                                         \
-  }                                                           \
-  static bool name##_initialize() {                           \
-    name::SetUpCache();                                       \
-    return true;                                              \
-  }                                                           \
-  static bool name##_cache_initialized = name##_initialize();
-
-DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
-DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
-DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
-DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
-DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
-
-#undef DEFINE_OPERAND_CACHE
-
-
 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
   return a.Value() < b.Value() ? a : b;
 }
diff --git a/src/lithium.cc b/src/lithium.cc
index 5a44fce..c41cce8 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -94,6 +94,37 @@
   }
 }
 
+#define DEFINE_OPERAND_CACHE(name, type)                      \
+  L##name* L##name::cache = NULL;                             \
+                                                              \
+  void L##name::SetUpCache() {                                \
+    if (cache) return;                                        \
+    cache = new L##name[kNumCachedOperands];                  \
+    for (int i = 0; i < kNumCachedOperands; i++) {            \
+      cache[i].ConvertTo(type, i);                            \
+    }                                                         \
+  }                                                           \
+                                                              \
+  void L##name::TearDownCache() {                             \
+    delete[] cache;                                           \
+  }
+
+LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
+#undef DEFINE_OPERAND_CACHE
+
+void LOperand::SetUpCaches() {
+#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
+#undef LITHIUM_OPERAND_SETUP
+}
+
+
+void LOperand::TearDownCaches() {
+#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
+#undef LITHIUM_OPERAND_TEARDOWN
+}
+
 
 bool LParallelMove::IsRedundant() const {
   for (int i = 0; i < move_operands_.length(); ++i) {
diff --git a/src/lithium.h b/src/lithium.h
index ec72695..2ccbf56 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -35,6 +35,14 @@
 namespace v8 {
 namespace internal {
 
+#define LITHIUM_OPERAND_LIST(V)         \
+  V(ConstantOperand, CONSTANT_OPERAND)  \
+  V(StackSlot,       STACK_SLOT)        \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
+  V(Register,        REGISTER)          \
+  V(DoubleRegister,  DOUBLE_REGISTER)
+
+
 class LOperand: public ZoneObject {
  public:
   enum Kind {
@@ -52,14 +60,13 @@
 
   Kind kind() const { return KindField::decode(value_); }
   int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-  bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
-  bool IsStackSlot() const { return kind() == STACK_SLOT; }
-  bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
-  bool IsRegister() const { return kind() == REGISTER; }
-  bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
-  bool IsArgument() const { return kind() == ARGUMENT; }
-  bool IsUnallocated() const { return kind() == UNALLOCATED; }
-  bool IsIgnored() const { return kind() == INVALID; }
+#define LITHIUM_OPERAND_PREDICATE(name, type) \
+  bool Is##name() const { return kind() == type; }
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
+  LITHIUM_OPERAND_PREDICATE(Argument, ARGUMENT)
+  LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
+  LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
+#undef LITHIUM_OPERAND_PREDICATE
   bool Equals(LOperand* other) const { return value_ == other->value_; }
 
   void PrintTo(StringStream* stream);
@@ -69,6 +76,10 @@
     ASSERT(this->index() == index);
   }
 
+  // Calls SetUpCache()/TearDownCache() for each subclass.
+  static void SetUpCaches();
+  static void TearDownCaches();
+
  protected:
   static const int kKindFieldWidth = 3;
   class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
@@ -261,10 +272,11 @@
   }
 
   static void SetUpCache();
+  static void TearDownCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LConstantOperand cache[];
+  static LConstantOperand* cache;
 
   LConstantOperand() : LOperand() { }
   explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
@@ -296,10 +308,11 @@
   }
 
   static void SetUpCache();
+  static void TearDownCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LStackSlot cache[];
+  static LStackSlot* cache;
 
   LStackSlot() : LOperand() { }
   explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
@@ -320,10 +333,11 @@
   }
 
   static void SetUpCache();
+  static void TearDownCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LDoubleStackSlot cache[];
+  static LDoubleStackSlot* cache;
 
   LDoubleStackSlot() : LOperand() { }
   explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
@@ -344,10 +358,11 @@
   }
 
   static void SetUpCache();
+  static void TearDownCache();
 
  private:
   static const int kNumCachedOperands = 16;
-  static LRegister cache[];
+  static LRegister* cache;
 
   LRegister() : LOperand() { }
   explicit LRegister(int index) : LOperand(REGISTER, index) { }
@@ -368,10 +383,11 @@
   }
 
   static void SetUpCache();
+  static void TearDownCache();
 
  private:
   static const int kNumCachedOperands = 16;
-  static LDoubleRegister cache[];
+  static LDoubleRegister* cache;
 
   LDoubleRegister() : LOperand() { }
   explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
diff --git a/src/log.cc b/src/log.cc
index e023d88..21d64df 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -35,6 +35,7 @@
 #include "global-handles.h"
 #include "log.h"
 #include "macro-assembler.h"
+#include "platform.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
 #include "string-stream.h"
@@ -1728,13 +1729,14 @@
   }
 }
 
+// Protects the state below.
+static LazyMutex active_samplers_mutex = LAZY_MUTEX_INITIALIZER;
 
-Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
 List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
 
 
 bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
-  ScopedLock lock(mutex_);
+  ScopedLock lock(active_samplers_mutex.Pointer());
   for (int i = 0;
        ActiveSamplersExist() && i < active_samplers_->length();
        ++i) {
@@ -1761,7 +1763,7 @@
 
 void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
   ASSERT(sampler->IsActive());
-  ScopedLock lock(mutex_);
+  ScopedLock lock(active_samplers_mutex.Pointer());
   if (active_samplers_ == NULL) {
     active_samplers_ = new List<Sampler*>;
   } else {
@@ -1773,7 +1775,7 @@
 
 void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
   ASSERT(sampler->IsActive());
-  ScopedLock lock(mutex_);
+  ScopedLock lock(active_samplers_mutex.Pointer());
   ASSERT(active_samplers_ != NULL);
   bool removed = active_samplers_->RemoveElement(sampler);
   ASSERT(removed);
diff --git a/src/log.h b/src/log.h
index e54f041..1297387 100644
--- a/src/log.h
+++ b/src/log.h
@@ -454,7 +454,6 @@
     return active_samplers_ != NULL && !active_samplers_->is_empty();
   }
 
-  static Mutex* mutex_;  // Protects the state below.
   static List<Sampler*>* active_samplers_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index dde172d..b4f488b 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1406,6 +1406,10 @@
 
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
 
+    if (shared->ic_age() != heap->global_ic_age()) {
+      shared->ResetForNewContext(heap->global_ic_age());
+    }
+
     if (!known_flush_code_candidate) {
       known_flush_code_candidate = IsFlushable(heap, shared);
       if (known_flush_code_candidate) {
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 611fbaa..51c2e46 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -119,7 +119,7 @@
   const int kInstrSize = Assembler::kInstrSize;
   // This structure comes from FullCodeGenerator::EmitStackCheck.
   // The call of the stack guard check has the following form:
-  // sltu at, sp, t0
+  // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
   // beq at, zero_reg, ok
   // lui t9, <stack guard address> upper
   // ori t9, <stack guard address> lower
@@ -167,7 +167,11 @@
 
   // Restore the sltu instruction so beq can be taken again.
   CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  patcher.masm()->sltu(at, sp, t0);
+  if (FLAG_count_based_interrupts) {
+    patcher.masm()->slt(at, a3, zero_reg);
+  } else {
+    patcher.masm()->sltu(at, sp, t0);
+  }
 
   // Replace the on-stack replacement address in the load-immediate (lui/ori
   // pair) with the entry address of the normal stack-check code.
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 3df2fc6..64cdc8e 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -42,6 +42,7 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
@@ -119,11 +120,6 @@
 };
 
 
-int FullCodeGenerator::self_optimization_header_size() {
-  return 10 * Instruction::kInstrSize;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
@@ -142,32 +138,11 @@
   CompilationInfo* info = info_;
   handler_table_ =
       isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
-  // We can optionally optimize based on counters rather than statistical
-  // sampling.
-  if (info->ShouldSelfOptimize()) {
-    if (FLAG_trace_opt_verbose) {
-      PrintF("[adding self-optimization header to %s]\n",
-             *info->function()->debug_name()->ToCString());
-    }
-    has_self_optimization_header_ = true;
-    MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
-        Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
-    JSGlobalPropertyCell* cell;
-    if (maybe_cell->To(&cell)) {
-      __ li(a2, Handle<JSGlobalPropertyCell>(cell));
-      __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-      __ Subu(a3, a3, Operand(Smi::FromInt(1)));
-      __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-      Handle<Code> compile_stub(
-          isolate()->builtins()->builtin(Builtins::kLazyRecompile));
-      __ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
-      ASSERT_EQ(masm_->pc_offset(), self_optimization_header_size());
-    }
-  }
-
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
       info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -341,6 +316,34 @@
 }
 
 
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ li(a2, Operand(profiling_counter_));
+  __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+  __ Subu(a3, a3, Operand(Smi::FromInt(delta)));
+  __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+    // Self-optimization is a one-off thing: if it fails, don't try again.
+    reset_value = Smi::kMaxValue;
+  }
+  if (isolate()->IsDebuggerActive()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = 10;
+  }
+  __ li(a2, Operand(profiling_counter_));
+  __ li(a3, Operand(Smi::FromInt(reset_value)));
+  __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 142;
+
+
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
                                        Label* back_edge_target) {
   // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
@@ -351,16 +354,35 @@
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
-  __ sltu(at, sp, t0);
-  __ beq(at, zero_reg, &ok);
-  // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
-  StackCheckStub stub;
-  __ CallStub(&stub);
+  if (FLAG_count_based_interrupts) {
+    int weight = 1;
+    if (FLAG_weighted_back_edges) {
+      ASSERT(back_edge_target->is_bound());
+      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kBackEdgeDistanceDivisor));
+    }
+    EmitProfilingCounterDecrement(weight);
+    __ slt(at, a3, zero_reg);
+    __ beq(at, zero_reg, &ok);
+    // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+    InterruptStub stub;
+    __ CallStub(&stub);
+  } else {
+    __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+    __ sltu(at, sp, t0);
+    __ beq(at, zero_reg, &ok);
+    // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+    StackCheckStub stub;
+    __ CallStub(&stub);
+  }
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
+  if (FLAG_count_based_interrupts) {
+    EmitProfilingCounterReset();
+  }
 
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -383,6 +405,32 @@
       __ push(v0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
+    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+      // Pretend that the exit is a backwards jump to the entry.
+      int weight = 1;
+      if (info_->ShouldSelfOptimize()) {
+        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+      } else if (FLAG_weighted_back_edges) {
+        int distance = masm_->pc_offset();
+        weight = Min(kMaxBackEdgeWeight,
+                     Max(1, distance / kBackEdgeDistanceDivisor));
+      }
+      EmitProfilingCounterDecrement(weight);
+      Label ok;
+      __ Branch(&ok, ge, a3, Operand(zero_reg));
+      __ push(v0);
+      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+        __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+        __ push(a2);
+        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+      } else {
+        InterruptStub stub;
+        __ CallStub(&stub);
+      }
+      __ pop(v0);
+      EmitProfilingCounterReset();
+      __ bind(&ok);
+    }
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
@@ -902,7 +950,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
 
     __ Branch(&next_test, ne, v0, Operand(zero_reg));
@@ -1195,7 +1243,7 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, mode);
+  CallIC(ic, mode);
 }
 
 
@@ -1279,7 +1327,7 @@
       __ lw(a0, GlobalObjectOperand());
       __ li(a2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(v0);
       break;
     }
@@ -1493,7 +1541,7 @@
             Handle<Code> ic = is_classic_mode()
                 ? isolate()->builtins()->StoreIC_Initialize()
                 : isolate()->builtins()->StoreIC_Initialize_Strict();
-            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
+            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1766,7 +1814,7 @@
   __ li(a2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name a0 and a2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1775,7 +1823,7 @@
   __ mov(a0, result_register());
   // Call keyed load IC. It has arguments key and receiver in a0 and a1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET, prop->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1803,7 +1851,7 @@
 
   __ bind(&stub_call);
   BinaryOpStub stub(op, mode);
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -1886,7 +1934,7 @@
   __ pop(a1);
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(v0);
 }
@@ -1927,7 +1975,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
-      __ Call(ic);
+      CallIC(ic);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1940,7 +1988,7 @@
       Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->KeyedStoreIC_Initialize()
         : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      __ Call(ic);
+      CallIC(ic);
       break;
     }
   }
@@ -1958,7 +2006,7 @@
     Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
-    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -2077,7 +2125,7 @@
   Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2129,7 +2177,7 @@
   Handle<Code> ic = is_classic_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize()
       : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2164,6 +2212,14 @@
 }
 
 
+void FullCodeGenerator::CallIC(Handle<Code> code,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id) {
+  ic_total_count_++;
+  __ Call(code, rmode, ast_id);
+}
+
+
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2181,7 +2237,7 @@
   // Call the IC initialization code.
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  __ Call(ic, mode, expr->id());
+  CallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2214,7 +2270,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3828,7 +3884,7 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    __ Call(ic, mode, expr->id());
+    CallIC(ic, mode, expr->id());
     // Restore context register.
     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3984,7 +4040,7 @@
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
   __ mov(a0, result_register());
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(v0);
 }
 
@@ -4095,7 +4151,7 @@
   SetSourcePosition(expr->position());
 
   BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4128,7 +4184,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4146,7 +4202,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize()
           : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4172,7 +4228,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ Call(ic);
+    CallIC(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(v0);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4350,7 +4406,7 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
       Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 94e8979..2af9d6f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -4237,14 +4237,21 @@
 }
 
 
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   Register scratch = scratch0();
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
-                   instr->environment());
+  Label success;
+  SmallMapList* map_set = instr->hydrogen()->map_set();
+  for (int i = 0; i < map_set->length() - 1; i++) {
+    Handle<Map> map = map_set->at(i);
+    __ CompareMapAndBranch(
+        reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
+  }
+  Handle<Map> map = map_set->last();
+  DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+  __ bind(&success);
 }
 
 
@@ -4505,34 +4512,51 @@
   }
 
 
-  // Copy elements backing store header.
-  ASSERT(!has_elements || elements->IsFixedArray());
   if (has_elements) {
+    // Copy elements backing store header.
     __ LoadHeapObject(source, elements);
     for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
       __ lw(a2, FieldMemOperand(source, i));
       __ sw(a2, FieldMemOperand(result, elements_offset + i));
     }
-  }
 
-  // Copy elements backing store content.
-  ASSERT(!has_elements || elements->IsFixedArray());
-  int elements_length = has_elements ? elements->length() : 0;
-  for (int i = 0; i < elements_length; i++) {
-    int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-    Handle<Object> value = JSObject::GetElement(object, i);
-    if (value->IsJSObject()) {
-      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-      __ Addu(a2, result, Operand(*offset));
-      __ sw(a2, FieldMemOperand(result, total_offset));
-      __ LoadHeapObject(source, value_object);
-      EmitDeepCopy(value_object, result, source, offset);
-    } else if (value->IsHeapObject()) {
-      __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
-      __ sw(a2, FieldMemOperand(result, total_offset));
+    // Copy elements backing store content.
+    int elements_length = has_elements ? elements->length() : 0;
+    if (elements->IsFixedDoubleArray()) {
+      Handle<FixedDoubleArray> double_array =
+          Handle<FixedDoubleArray>::cast(elements);
+      for (int i = 0; i < elements_length; i++) {
+        int64_t value = double_array->get_representation(i);
+        // We only support little endian mode...
+        int32_t value_low = value & 0xFFFFFFFF;
+        int32_t value_high = value >> 32;
+        int total_offset =
+            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+        __ li(a2, Operand(value_low));
+        __ sw(a2, FieldMemOperand(result, total_offset));
+        __ li(a2, Operand(value_high));
+        __ sw(a2, FieldMemOperand(result, total_offset + 4));
+      }
+    } else if (elements->IsFixedArray()) {
+      for (int i = 0; i < elements_length; i++) {
+        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
+        Handle<Object> value = JSObject::GetElement(object, i);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          __ Addu(a2, result, Operand(*offset));
+          __ sw(a2, FieldMemOperand(result, total_offset));
+          __ LoadHeapObject(source, value_object);
+          EmitDeepCopy(value_object, result, source, offset);
+        } else if (value->IsHeapObject()) {
+          __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+          __ sw(a2, FieldMemOperand(result, total_offset));
+        } else {
+          __ li(a2, Operand(value));
+          __ sw(a2, FieldMemOperand(result, total_offset));
+        }
+      }
     } else {
-      __ li(a2, Operand(value));
-      __ sw(a2, FieldMemOperand(result, total_offset));
+      UNREACHABLE();
     }
   }
 }
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 1e0c216..4585a4e 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1755,9 +1755,9 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckMap(value);
+  LInstruction* result = new(zone()) LCheckMaps(value);
   return AssignEnvironment(result);
 }
 
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 5a7bf4d..952df99 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -71,7 +71,7 @@
   V(CallStub)                                   \
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
-  V(CheckMap)                                   \
+  V(CheckMaps)                                  \
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -1869,14 +1869,14 @@
 };
 
 
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
  public:
-  explicit LCheckMap(LOperand* value) {
+  explicit LCheckMaps(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
-  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
 };
 
 
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index d42d4cf..d167f62 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -81,6 +81,14 @@
                                               uc16 minus,
                                               uc16 mask,
                                               Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
   // Checks whether the given offset from the current position is before
   // the end of the string.
   virtual void CheckPosition(int cp_offset, Label* on_outside_input);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 78578cc..bf86a82 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3060,21 +3060,6 @@
 }
 
 
-bool Code::has_self_optimization_header() {
-  ASSERT(kind() == FUNCTION);
-  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
-  return FullCodeFlagsHasSelfOptimizationHeader::decode(flags);
-}
-
-
-void Code::set_self_optimization_header(bool value) {
-  ASSERT(kind() == FUNCTION);
-  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
-  flags = FullCodeFlagsHasSelfOptimizationHeader::update(flags, value);
-  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
 int Code::allow_osr_at_loop_nesting_level() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
@@ -3088,6 +3073,19 @@
 }
 
 
+int Code::profiler_ticks() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kProfilerTicksOffset);
+}
+
+
+void Code::set_profiler_ticks(int ticks) {
+  ASSERT(kind() == FUNCTION);
+  ASSERT(ticks < 256);
+  WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+}
+
+
 unsigned Code::stack_slots() {
   ASSERT(kind() == OPTIMIZED_FUNCTION);
   return READ_UINT32_FIELD(this, kStackSlotsOffset);
@@ -3507,8 +3505,8 @@
 ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
 ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
           kThisPropertyAssignmentsOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
 
-SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
 
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
                kHiddenPrototypeBit)
@@ -4814,7 +4812,7 @@
 
 
 SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
-SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
+SMI_ACCESSORS(TypeFeedbackInfo, ic_with_type_info_count,
               kIcWithTypeinfoCountOffset)
 ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
           kTypeFeedbackCellsOffset)
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 38e6138..2353a95 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -559,8 +559,8 @@
 
 void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "TypeFeedbackInfo");
-  PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
-         ic_total_count(), ic_with_typeinfo_count());
+  PrintF(out, "\n - ic_total_count: %d, ic_with_type_info_count: %d",
+         ic_total_count(), ic_with_type_info_count());
   PrintF(out, "\n - type_feedback_cells: ");
   type_feedback_cells()->FixedArrayPrint(out);
 }
diff --git a/src/objects.cc b/src/objects.cc
index 64d85a0..29ccacf 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -7869,6 +7869,19 @@
 }
 
 
+void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
+  code()->ClearInlineCaches();
+  code()->set_profiler_ticks(0);
+  set_ic_age(new_ic_age);
+  if (optimization_disabled() && opt_count() >= Compiler::kDefaultMaxOptCount) {
+    // Re-enable optimizations if they were disabled due to opt_count limit.
+    set_optimization_disabled(false);
+    code()->set_optimizable(true);
+  }
+  set_opt_count(0);
+}
+
+
 static void GetMinInobjectSlack(Map* map, void* data) {
   int slack = map->unused_property_fields();
   if (*reinterpret_cast<int*>(data) > slack) {
@@ -8116,6 +8129,21 @@
 }
 
 
+void Code::ClearInlineCaches() {
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+             RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
+             RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
+             RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
+    if (target->is_inline_cache_stub()) {
+      IC::Clear(info->pc());
+    }
+  }
+}
+
+
 #ifdef ENABLE_DISASSEMBLER
 
 void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
diff --git a/src/objects.h b/src/objects.h
index a9cb8e0..ca6acf5 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -4243,11 +4243,6 @@
   inline bool is_compiled_optimizable();
   inline void set_compiled_optimizable(bool value);
 
-  // [has_self_optimization_header]: For FUNCTION kind, tells if it has
-  // a self-optimization header.
-  inline bool has_self_optimization_header();
-  inline void set_self_optimization_header(bool value);
-
   // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
   // how long the function has been marked for OSR and therefore which
   // level of loop nesting we are willing to do on-stack replacement
@@ -4255,6 +4250,11 @@
   inline void set_allow_osr_at_loop_nesting_level(int level);
   inline int allow_osr_at_loop_nesting_level();
 
+  // [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
+  // the code object was seen on the stack with no IC patching going on.
+  inline int profiler_ticks();
+  inline void set_profiler_ticks(int ticks);
+
   // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
   // reserved in the code prologue.
   inline unsigned stack_slots();
@@ -4423,6 +4423,7 @@
 #ifdef DEBUG
   void CodeVerify();
 #endif
+  void ClearInlineCaches();
 
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
@@ -4468,11 +4469,11 @@
       public BitField<bool, 0, 1> {};  // NOLINT
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
   class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
-  class FullCodeFlagsHasSelfOptimizationHeader: public BitField<bool, 3, 1> {};
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
 
   static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
+  static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
 
   static const int kSafepointTableOffsetOffset = kStackSlotsOffset + kIntSize;
   static const int kStackCheckTableOffsetOffset = kStackSlotsOffset + kIntSize;
@@ -5323,16 +5324,18 @@
   inline int compiler_hints();
   inline void set_compiler_hints(int value);
 
+  inline int ast_node_count();
+  inline void set_ast_node_count(int count);
+
   // A counter used to determine when to stress the deoptimizer with a
   // deopt.
   inline int deopt_counter();
   inline void set_deopt_counter(int counter);
 
-  inline int profiler_ticks();
-  inline void set_profiler_ticks(int ticks);
-
-  inline int ast_node_count();
-  inline void set_ast_node_count(int count);
+  // Inline cache age is used to infer whether the function survived a context
+  // disposal or not. In the former case we reset the opt_count.
+  inline int ic_age();
+  inline void set_ic_age(int age);
 
   // Add information on assignments of the form this.x = ...;
   void SetThisPropertyAssignmentsInfo(
@@ -5478,6 +5481,8 @@
   void SharedFunctionInfoVerify();
 #endif
 
+  void ResetForNewContext(int new_ic_age);
+
   // Helpers to compile the shared code.  Returns true on success, false on
   // failure (e.g., stack overflow during compilation).
   static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
@@ -5508,12 +5513,13 @@
       kInferredNameOffset + kPointerSize;
   static const int kThisPropertyAssignmentsOffset =
       kInitialMapOffset + kPointerSize;
-  static const int kProfilerTicksOffset =
-      kThisPropertyAssignmentsOffset + kPointerSize;
+  // ic_age is a Smi field. It could be grouped with another Smi field into a
+  // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
+  static const int kICAgeOffset = kThisPropertyAssignmentsOffset + kPointerSize;
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
   static const int kLengthOffset =
-      kProfilerTicksOffset + kPointerSize;
+      kICAgeOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -5532,8 +5538,9 @@
   static const int kOptCountOffset =
       kThisPropertyAssignmentsCountOffset + kPointerSize;
   static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
-  static const int kDeoptCounterOffset =
-      kAstNodeCountOffset + kPointerSize;
+  static const int kDeoptCounterOffset = kAstNodeCountOffset + kPointerSize;
+
+
   // Total size.
   static const int kSize = kDeoptCounterOffset + kPointerSize;
 #else
@@ -5547,7 +5554,7 @@
   // word is not set and thus this word cannot be treated as pointer
   // to HeapObject during old space traversal.
   static const int kLengthOffset =
-      kProfilerTicksOffset + kPointerSize;
+      kICAgeOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -6562,8 +6569,8 @@
   inline int ic_total_count();
   inline void set_ic_total_count(int count);
 
-  inline int ic_with_typeinfo_count();
-  inline void set_ic_with_typeinfo_count(int count);
+  inline int ic_with_type_info_count();
+  inline void set_ic_with_type_info_count(int count);
 
   DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
 
diff --git a/src/once.cc b/src/once.cc
new file mode 100644
index 0000000..37fe369
--- /dev/null
+++ b/src/once.cc
@@ -0,0 +1,77 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "once.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include "atomicops.h"
+#include "checks.h"
+
+namespace v8 {
+namespace internal {
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+  AtomicWord state = Acquire_Load(once);
+  // Fast path. The provided function was already executed.
+  if (state == ONCE_STATE_DONE) {
+    return;
+  }
+
+  // The function execution did not complete yet. The once object can be in one
+  // of the two following states:
+  //   - UNINITIALIZED: We are the first thread calling this function.
+  //   - EXECUTING_FUNCTION: Another thread is already executing the function.
+  //
+  // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
+  // atomically.
+  state = Acquire_CompareAndSwap(
+      once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
+  if (state == ONCE_STATE_UNINITIALIZED) {
+    // We are the first thread to call this function, so we have to call the
+    // function.
+    init_func(arg);
+    Release_Store(once, ONCE_STATE_DONE);
+  } else {
+    // Another thread has already started executing the function. We need to
+    // wait until it completes the initialization.
+    while (state == ONCE_STATE_EXECUTING_FUNCTION) {
+#ifdef _WIN32
+      ::Sleep(0);
+#else
+      sched_yield();
+#endif
+      state = Acquire_Load(once);
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/once.h b/src/once.h
new file mode 100644
index 0000000..a44b8fa
--- /dev/null
+++ b/src/once.h
@@ -0,0 +1,123 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by v8's internal code. Users
+// should not use this directly.
+//
+// This is basically a portable version of pthread_once().
+//
+// This header declares:
+// * A type called OnceType.
+// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
+//   OnceType.
+// * A function CallOnce(OnceType* once, void (*init_func)()).
+//   This function, when invoked multiple times given the same OnceType object,
+//   will invoke init_func on the first call only, and will make sure none of
+//   the calls return before that first call to init_func has finished.
+//
+// Additionally, the following features are supported:
+// * A macro V8_ONCE_INIT which is expanded into the expression used to
+//   initialize a OnceType. This is only useful when clients embed a OnceType
+//   into a structure of their own and want to initialize it statically.
+// * The user can provide a parameter which CallOnce() forwards to the
+//   user-provided function when it is called. Usage example:
+//     CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
+// * This implementation guarantees that OnceType is a POD (i.e. no static
+//   initializer generated).
+//
+// This implements a way to perform lazy initialization.  It's more efficient
+// than using mutexes as no lock is needed if initialization has already
+// happened.
+//
+// Example usage:
+//   void Init();
+//   V8_DECLARE_ONCE(once_init);
+//
+//   // Calls Init() exactly once.
+//   void InitOnce() {
+//     CallOnce(&once_init, &Init);
+//   }
+//
+// Note that if CallOnce() is called before main() has begun, it must
+// only be called by the thread that will eventually call main() -- that is,
+// the thread that performs dynamic initialization.  In general this is a safe
+// assumption since people don't usually construct threads before main() starts,
+// but it is technically not guaranteed.  Unfortunately, Win32 provides no way
+// whatsoever to statically-initialize its synchronization primitives, so our
+// only choice is to assume that dynamic initialization is single-threaded.
+
+#ifndef V8_ONCE_H_
+#define V8_ONCE_H_
+
+#include "atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+typedef AtomicWord OnceType;
+
+#define V8_ONCE_INIT 0
+
+#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
+
+enum {
+  ONCE_STATE_UNINITIALIZED = 0,
+  ONCE_STATE_EXECUTING_FUNCTION = 1,
+  ONCE_STATE_DONE = 2
+};
+
+typedef void (*NoArgFunction)();
+typedef void (*PointerArgFunction)(void* arg);
+
+template <typename T>
+struct OneArgFunction {
+  typedef void (*type)(T);
+};
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+
+inline void CallOnce(OnceType* once, NoArgFunction init_func) {
+  if (Acquire_Load(once) != ONCE_STATE_DONE) {
+    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+  }
+}
+
+
+template <typename Arg>
+inline void CallOnce(OnceType* once,
+    typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+  if (Acquire_Load(once) != ONCE_STATE_DONE) {
+    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+        static_cast<void*>(arg));
+  }
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_ONCE_H_
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 2dc1ed8..fa6fce0 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -628,7 +628,7 @@
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -639,7 +639,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -725,7 +725,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SamplerThread* instance_;
 
  private:
@@ -733,7 +733,7 @@
 };
 
 
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index f6a426f..2a9e174 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -723,7 +723,7 @@
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Install a signal handler.
@@ -743,7 +743,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -836,7 +836,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
@@ -845,7 +845,7 @@
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 287d820..08f4495 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1093,7 +1093,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -1106,7 +1106,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -1209,7 +1209,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
@@ -1219,7 +1219,7 @@
 };
 
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 89abf39..bfcaab0 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -746,7 +746,7 @@
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -757,7 +757,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -854,7 +854,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SamplerThread* instance_;
 
  private:
@@ -864,7 +864,7 @@
 #undef REGISTER_FIELD
 
 
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 0d69971..b79cb71 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -812,7 +812,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -825,7 +825,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -919,7 +919,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
@@ -929,7 +929,7 @@
 };
 
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 4b1edfe..a729b66 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -127,17 +127,15 @@
 }
 
 
-static Mutex* math_function_mutex = OS::CreateMutex();
-
 #define UNARY_MATH_FUNCTION(name, generator)             \
 static UnaryMathFunction fast_##name##_function = NULL;  \
+V8_DECLARE_ONCE(fast_##name##_init_once);                \
+void init_fast_##name##_function() {                     \
+  fast_##name##_function = generator;                    \
+}                                                        \
 double fast_##name(double x) {                           \
-  if (fast_##name##_function == NULL) {                  \
-    ScopedLock lock(math_function_mutex);                \
-    UnaryMathFunction temp = generator;                  \
-    MemoryBarrier();                                     \
-    fast_##name##_function = temp;                       \
-  }                                                      \
+  CallOnce(&fast_##name##_init_once,                     \
+           &init_fast_##name##_function);                \
   return (*fast_##name##_function)(x);                   \
 }
 
@@ -307,14 +305,14 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
+static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
 // Defined in codegen-ia32.cc.
 OS::MemCopyFunction CreateMemCopyFunction();
 
 // Copy memory area to disjoint memory area.
 void OS::MemCopy(void* dest, const void* src, size_t size) {
   if (memcopy_function == NULL) {
-    ScopedLock lock(memcopy_function_mutex);
+    ScopedLock lock(memcopy_function_mutex.Pointer());
     if (memcopy_function == NULL) {
       OS::MemCopyFunction temp = CreateMemCopyFunction();
       MemoryBarrier();
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 004a6ed..50ad353 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -733,7 +733,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -746,7 +746,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -840,7 +840,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
@@ -849,7 +849,7 @@
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
+LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index e7be930..12cd610 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -66,16 +66,8 @@
 
 
 #ifndef __MINGW64_VERSION_MAJOR
-
-// Not sure this the correct interpretation of _mkgmtime
-time_t _mkgmtime(tm* timeptr) {
-  return mktime(timeptr);
-}
-
-
 #define _TRUNCATE 0
 #define STRUNCATE 80
-
 #endif  // __MINGW64_VERSION_MAJOR
 
 
@@ -149,14 +141,14 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 static OS::MemCopyFunction memcopy_function = NULL;
-static Mutex* memcopy_function_mutex = OS::CreateMutex();
+static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
 // Defined in codegen-ia32.cc.
 OS::MemCopyFunction CreateMemCopyFunction();
 
 // Copy memory area to disjoint memory area.
 void OS::MemCopy(void* dest, const void* src, size_t size) {
   if (memcopy_function == NULL) {
-    ScopedLock lock(memcopy_function_mutex);
+    ScopedLock lock(memcopy_function_mutex.Pointer());
     if (memcopy_function == NULL) {
       OS::MemCopyFunction temp = CreateMemCopyFunction();
       MemoryBarrier();
@@ -175,19 +167,16 @@
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
 static ModuloFunction modulo_function = NULL;
-static Mutex* modulo_function_mutex = OS::CreateMutex();
+V8_DECLARE_ONCE(modulo_function_init_once);
 // Defined in codegen-x64.cc.
 ModuloFunction CreateModuloFunction();
 
+void init_modulo_function() {
+  modulo_function = CreateModuloFunction();
+}
+
 double modulo(double x, double y) {
-  if (modulo_function == NULL) {
-    ScopedLock lock(modulo_function_mutex);
-    if (modulo_function == NULL) {
-      ModuloFunction temp = CreateModuloFunction();
-      MemoryBarrier();
-      modulo_function = temp;
-    }
-  }
+  CallOnce(&modulo_function_init_once, &init_modulo_function);
   // Note: here we rely on dependent reads being ordered. This is true
   // on all architectures we currently support.
   return (*modulo_function)(x, y);
@@ -208,17 +197,15 @@
 #endif  // _WIN64
 
 
-static Mutex* math_function_mutex = OS::CreateMutex();
-
 #define UNARY_MATH_FUNCTION(name, generator)             \
 static UnaryMathFunction fast_##name##_function = NULL;  \
+V8_DECLARE_ONCE(fast_##name##_init_once);                \
+void init_fast_##name##_function() {                     \
+  fast_##name##_function = generator;                    \
+}                                                        \
 double fast_##name(double x) {                           \
-  if (fast_##name##_function == NULL) {                  \
-    ScopedLock lock(math_function_mutex);                \
-    UnaryMathFunction temp = generator;                  \
-    MemoryBarrier();                                     \
-    fast_##name##_function = temp;                       \
-  }                                                      \
+  CallOnce(&fast_##name##_init_once,                     \
+           &init_fast_##name##_function);                \
   return (*fast_##name##_function)(x);                   \
 }
 
@@ -475,6 +462,9 @@
   // Check if we need to resync due to elapsed time.
   needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
 
+  // Check if we need to resync due to backwards time change.
+  needs_resync |= time_now.t_ < init_time.t_;
+
   // Resync the clock if necessary.
   if (needs_resync) {
     GetSystemTimeAsFileTime(&init_time.ft_);
@@ -516,11 +506,14 @@
   // Convert to local time, as struct with fields for day, hour, year, etc.
   tm posix_local_time_struct;
   if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
-  // Convert local time in struct to POSIX time as if it were a UTC time.
-  time_t local_posix_time = _mkgmtime(&posix_local_time_struct);
-  Time localtime(1000.0 * local_posix_time);
 
-  return localtime.Diff(&rounded_to_second);
+  if (posix_local_time_struct.tm_isdst > 0) {
+    return (tzinfo_.Bias + tzinfo_.DaylightBias) * -kMsPerMinute;
+  } else if (posix_local_time_struct.tm_isdst == 0) {
+    return (tzinfo_.Bias + tzinfo_.StandardBias) * -kMsPerMinute;
+  } else {
+    return tzinfo_.Bias * -kMsPerMinute;
+  }
 }
 
 
@@ -1961,7 +1954,7 @@
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -1972,7 +1965,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
+    ScopedLock lock(mutex_.Pointer());
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -2058,7 +2051,7 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static LazyMutex mutex_;
   static SamplerThread* instance_;
 
  private:
@@ -2066,7 +2059,7 @@
 };
 
 
-Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform.h b/src/platform.h
index 00819ed..4ec6057 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -79,6 +79,7 @@
 #endif  // WIN32
 
 #include "atomicops.h"
+#include "lazy-instance.h"
 #include "platform-tls.h"
 #include "utils.h"
 #include "v8globals.h"
@@ -529,6 +530,24 @@
   virtual bool TryLock() = 0;
 };
 
+struct CreateMutexTrait {
+  static Mutex* Create() {
+    return OS::CreateMutex();
+  }
+};
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+//   void my_function() {
+//     ScopedLock my_lock(my_mutex.Pointer());
+//     // Do something.
+//   }
+//
+typedef LazyDynamicInstance<Mutex, CreateMutexTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
 
 // ----------------------------------------------------------------------------
 // ScopedLock
@@ -578,6 +597,30 @@
   virtual void Signal() = 0;
 };
 
+template <int InitialValue>
+struct CreateSemaphoreTrait {
+  static Semaphore* Create() {
+    return OS::CreateSemaphore(InitialValue);
+  }
+};
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   // The following semaphore starts at 0.
+//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+//   void my_function() {
+//     // Do something with my_semaphore.Pointer().
+//   }
+//
+template <int InitialValue>
+struct LazySemaphore {
+  typedef typename LazyDynamicInstance<
+      Semaphore, CreateSemaphoreTrait<InitialValue> >::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
 
 // ----------------------------------------------------------------------------
 // Socket
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 2d0984e..2315fc5 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1133,7 +1133,8 @@
       gc_roots_entry_(NULL),
       natives_root_entry_(NULL),
       raw_entries_(NULL),
-      entries_sorted_(false) {
+      entries_sorted_(false),
+      max_snapshot_js_object_id_(0) {
   STATIC_CHECK(
       sizeof(HeapGraphEdge) ==
       SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
@@ -1157,6 +1158,11 @@
 }
 
 
+void HeapSnapshot::RememberLastJSObjectId() {
+  max_snapshot_js_object_id_ = collection_->last_assigned_id();
+}
+
+
 void HeapSnapshot::AllocateEntries(int entries_count,
                                    int children_count,
                                    int retainers_count) {
@@ -3105,6 +3111,8 @@
   // Pass 2. Fill references.
   if (!FillReferences()) return false;
 
+  snapshot_->RememberLastJSObjectId();
+
   if (!SetEntriesDominators()) return false;
   if (!CalculateRetainedSizes()) return false;
 
diff --git a/src/profile-generator.h b/src/profile-generator.h
index d9a1319..b9de69b 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -35,8 +35,6 @@
 namespace v8 {
 namespace internal {
 
-typedef uint32_t SnapshotObjectId;
-
 class TokenEnumerator {
  public:
   TokenEnumerator();
@@ -647,6 +645,10 @@
   HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
   List<HeapEntry*>* entries() { return &entries_; }
   size_t raw_entries_size() { return raw_entries_size_; }
+  void RememberLastJSObjectId();
+  SnapshotObjectId max_snapshot_js_object_id() const {
+    return max_snapshot_js_object_id_;
+  }
 
   void AllocateEntries(
       int entries_count, int children_count, int retainers_count);
@@ -687,6 +689,7 @@
   List<HeapEntry*> entries_;
   bool entries_sorted_;
   size_t raw_entries_size_;
+  SnapshotObjectId max_snapshot_js_object_id_;
 
   friend class HeapSnapshotTester;
 
@@ -702,6 +705,9 @@
   void SnapshotGenerationFinished();
   SnapshotObjectId FindObject(Address addr);
   void MoveObject(Address from, Address to);
+  SnapshotObjectId last_assigned_id() const {
+    return next_id_ - kObjectIdStep;
+  }
 
   static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
   static inline SnapshotObjectId GetNthGcSubrootId(int delta);
@@ -766,6 +772,9 @@
   SnapshotObjectId GetObjectId(Address addr) { return ids_.FindObject(addr); }
   Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
   void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+  SnapshotObjectId last_assigned_id() const {
+    return ids_.last_assigned_id();
+  }
 
  private:
   INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
diff --git a/src/regexp-macro-assembler-irregexp-inl.h b/src/regexp-macro-assembler-irregexp-inl.h
index f2a4e85..a767ec0 100644
--- a/src/regexp-macro-assembler-irregexp-inl.h
+++ b/src/regexp-macro-assembler-irregexp-inl.h
@@ -62,6 +62,16 @@
 }
 
 
+void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
+  ASSERT(pc_ <= buffer_.length());
+  if (pc_ == buffer_.length()) {
+    Expand();
+  }
+  *reinterpret_cast<unsigned char*>(buffer_.start() + pc_) = word;
+  pc_ += 1;
+}
+
+
 void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
   ASSERT(pc_ <= buffer_.length());
   if (pc_ + 3 >= buffer_.length()) {
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index 322efa1..aa67919 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -352,6 +352,42 @@
 }
 
 
+void RegExpMacroAssemblerIrregexp::CheckCharacterInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_in_range) {
+  Emit(BC_CHECK_CHAR_IN_RANGE, 0);
+  Emit16(from);
+  Emit16(to);
+  EmitOrLink(on_in_range);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckCharacterNotInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_not_in_range) {
+  Emit(BC_CHECK_CHAR_NOT_IN_RANGE, 0);
+  Emit16(from);
+  Emit16(to);
+  EmitOrLink(on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerIrregexp::CheckBitInTable(
+    Handle<ByteArray> table, Label* on_bit_set) {
+  Emit(BC_CHECK_BIT_IN_TABLE, 0);
+  EmitOrLink(on_bit_set);
+  for (int i = 0; i < kTableSize; i += kBitsPerByte) {
+    int byte = 0;
+    for (int j = 0; j < kBitsPerByte; j++) {
+      if (table->get(i + j) != 0) byte |= 1 << j;
+    }
+    Emit8(byte);
+  }
+}
+
+
 void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
                                                          Label* on_not_equal) {
   ASSERT(start_reg >= 0);
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 262ead2..25cb68d 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -93,6 +93,13 @@
                                               uc16 minus,
                                               uc16 mask,
                                               Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
   virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
                                                Label* on_no_match);
@@ -114,6 +121,7 @@
   inline void EmitOrLink(Label* label);
   inline void Emit32(uint32_t x);
   inline void Emit16(uint32_t x);
+  inline void Emit8(uint32_t x);
   inline void Emit(uint32_t bc, uint32_t arg);
   // Bytecode buffer.
   int length();
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index f843278..b7aeac4 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -198,24 +198,55 @@
 }
 
 
+class PrintablePrinter {
+ public:
+  explicit PrintablePrinter(uc16 character) : character_(character) { }
+
+  const char* operator*() {
+    if (character_ >= ' ' && character_ <= '~') {
+      buffer_[0] = '(';
+      buffer_[1] = static_cast<char>(character_);
+      buffer_[2] = ')';
+      buffer_[3] = '\0';
+    } else {
+      buffer_[0] = '\0';
+    }
+    return &buffer_[0];
+  };
+
+ private:
+  uc16 character_;
+  char buffer_[4];
+};
+
+
 void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
-  PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n",
-         limit, LabelToInt(on_less));
+  PrintablePrinter printable(limit);
+  PrintF(" CheckCharacterLT(c=0x%04x%s, label[%08x]);\n",
+         limit,
+         *printable,
+         LabelToInt(on_less));
   assembler_->CheckCharacterLT(limit, on_less);
 }
 
 
 void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
                                                   Label* on_greater) {
-  PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n",
-         limit, LabelToInt(on_greater));
+  PrintablePrinter printable(limit);
+  PrintF(" CheckCharacterGT(c=0x%04x%s, label[%08x]);\n",
+         limit,
+         *printable,
+         LabelToInt(on_greater));
   assembler_->CheckCharacterGT(limit, on_greater);
 }
 
 
 void RegExpMacroAssemblerTracer::CheckCharacter(unsigned c, Label* on_equal) {
-  PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
-         c, LabelToInt(on_equal));
+  PrintablePrinter printable(c);
+  PrintF(" CheckCharacter(c=0x%04x%s, label[%08x]);\n",
+         c,
+         *printable,
+         LabelToInt(on_equal));
   assembler_->CheckCharacter(c, on_equal);
 }
 
@@ -234,8 +265,11 @@
 
 void RegExpMacroAssemblerTracer::CheckNotCharacter(unsigned c,
                                                    Label* on_not_equal) {
-  PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
-         c, LabelToInt(on_not_equal));
+  PrintablePrinter printable(c);
+  PrintF(" CheckNotCharacter(c=0x%04x%s, label[%08x]);\n",
+         c,
+         *printable,
+         LabelToInt(on_not_equal));
   assembler_->CheckNotCharacter(c, on_not_equal);
 }
 
@@ -244,8 +278,10 @@
     unsigned c,
     unsigned mask,
     Label* on_equal) {
-  PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
+  PrintablePrinter printable(c);
+  PrintF(" CheckCharacterAfterAnd(c=0x%04x%s, mask=0x%04x, label[%08x]);\n",
          c,
+         *printable,
          mask,
          LabelToInt(on_equal));
   assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
@@ -256,8 +292,10 @@
     unsigned c,
     unsigned mask,
     Label* on_not_equal) {
-  PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
+  PrintablePrinter printable(c);
+  PrintF(" CheckNotCharacterAfterAnd(c=0x%04x%s, mask=0x%04x, label[%08x]);\n",
          c,
+         *printable,
          mask,
          LabelToInt(on_not_equal));
   assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
@@ -269,7 +307,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  PrintF(" CheckNotCharacterAfterMinusAnd(c='u%04x', minus=%04x, mask=0x%04x, "
+  PrintF(" CheckNotCharacterAfterMinusAnd(c=0x%04x, minus=%04x, mask=0x%04x, "
              "label[%08x]);\n",
          c,
          minus,
@@ -279,6 +317,53 @@
 }
 
 
+void RegExpMacroAssemblerTracer::CheckCharacterInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_not_in_range) {
+  PrintablePrinter printable_from(from);
+  PrintablePrinter printable_to(to);
+  PrintF(" CheckCharacterInRange(from=0x%04x%s, to=0x%04x%s, label[%08x]);\n",
+         from,
+         *printable_from,
+         to,
+         *printable_to,
+         LabelToInt(on_not_in_range));
+  assembler_->CheckCharacterInRange(from, to, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckCharacterNotInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_in_range) {
+  PrintablePrinter printable_from(from);
+  PrintablePrinter printable_to(to);
+  PrintF(
+      " CheckCharacterNotInRange(from=0x%04x%s," " to=%04x%s, label[%08x]);\n",
+      from,
+      *printable_from,
+      to,
+      *printable_to,
+      LabelToInt(on_in_range));
+  assembler_->CheckCharacterNotInRange(from, to, on_in_range);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckBitInTable(
+    Handle<ByteArray> table, Label* on_bit_set) {
+  PrintF(" CheckBitInTable(label[%08x] ", LabelToInt(on_bit_set));
+  for (int i = 0; i < kTableSize; i++) {
+    PrintF("%c", table->get(i) != 0 ? 'X' : '.');
+    if (i % 32 == 31 && i != kTableMask) {
+      PrintF("\n                                 ");
+    }
+  }
+  PrintF(");\n");
+  assembler_->CheckBitInTable(table, on_bit_set);
+}
+
+
 void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
                                                        Label* on_no_match) {
   PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
@@ -314,7 +399,7 @@
   PrintF(" %s(str=\"",
          check_end_of_string ? "CheckCharacters" : "CheckCharactersUnchecked");
   for (int i = 0; i < str.length(); i++) {
-    PrintF("u%04x", str[i]);
+    PrintF("0x%04x", str[i]);
   }
   PrintF("\", cp_offset=%d, label[%08x])\n",
          cp_offset, LabelToInt(on_failure));
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 1cf0349..3fd4d8b 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -68,6 +68,13 @@
                                               uc16 minus,
                                               uc16 and_with,
                                               Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
   virtual bool CheckSpecialCharacterClass(uc16 type,
                                           Label* on_no_match);
   virtual void Fail();
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 0314c70..8587435 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -45,6 +45,11 @@
   static const int kMaxRegister = (1 << 16) - 1;
   static const int kMaxCPOffset = (1 << 15) - 1;
   static const int kMinCPOffset = -(1 << 15);
+
+  static const int kTableSizeBits = 7;
+  static const int kTableSize = 1 << kTableSizeBits;
+  static const int kTableMask = kTableSize - 1;
+
   enum IrregexpImplementation {
     kIA32Implementation,
     kARMImplementation,
@@ -106,12 +111,23 @@
   virtual void CheckNotCharacterAfterAnd(unsigned c,
                                          unsigned and_with,
                                          Label* on_not_equal) = 0;
-  // Subtract a constant from the current character, then or with the given
+  // Subtract a constant from the current character, then and with the given
   // constant and then check for a match with c.
   virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
                                               uc16 minus,
                                               uc16 and_with,
                                               Label* on_not_equal) = 0;
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,  // Both inclusive.
+                                     Label* on_in_range) = 0;
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,  // Both inclusive.
+                                        Label* on_not_in_range) = 0;
+
+  // The current character (modulus the kTableSize) is looked up in the byte
+  // array, and if the found byte is non-zero, we jump to the on_bit_set label.
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set) = 0;
+
   virtual void CheckNotRegistersEqual(int reg1,
                                       int reg2,
                                       Label* on_not_equal) = 0;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index fc7e359..7d51190 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -65,6 +65,12 @@
 // Number of times a function has to be seen on the stack before it is
 // optimized.
 static const int kProfilerTicksBeforeOptimization = 2;
+// If a function does not have enough type info (according to
+// FLAG_type_info_threshold), but has seen a huge number of ticks,
+// optimize it as it is.
+static const int kTicksWhenNotEnoughTypeInfo = 100;
+// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
 
 // Maximum size in bytes of generated code for a function to be optimized
 // the very first time it is seen on the stack.
@@ -72,9 +78,9 @@
 
 
 Atomic32 RuntimeProfiler::state_ = 0;
-// TODO(isolates): Create the semaphore lazily and clean it up when no
-// longer required.
-Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
+
+// TODO(isolates): Clean up the semaphore when it is no longer required.
+static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
 
 #ifdef DEBUG
 bool RuntimeProfiler::has_been_globally_set_up_ = false;
@@ -88,7 +94,9 @@
       sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
       sampler_ticks_until_threshold_adjustment_(
           kSamplerTicksBetweenThresholdAdjustment),
-      sampler_window_position_(0) {
+      sampler_window_position_(0),
+      any_ic_changed_(false),
+      code_generated_(false) {
   ClearSampleBuffer();
 }
 
@@ -103,20 +111,20 @@
 
 
 static void GetICCounts(JSFunction* function,
-                        int* ic_with_typeinfo_count,
+                        int* ic_with_type_info_count,
                         int* ic_total_count,
                         int* percentage) {
   *ic_total_count = 0;
-  *ic_with_typeinfo_count = 0;
+  *ic_with_type_info_count = 0;
   Object* raw_info =
       function->shared()->code()->type_feedback_info();
   if (raw_info->IsTypeFeedbackInfo()) {
     TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
-    *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+    *ic_with_type_info_count = info->ic_with_type_info_count();
     *ic_total_count = info->ic_total_count();
   }
   *percentage = *ic_total_count > 0
-      ? 100 * *ic_with_typeinfo_count / *ic_total_count
+      ? 100 * *ic_with_type_info_count / *ic_total_count
       : 100;
 }
 
@@ -173,12 +181,10 @@
   // prepared to generate it, but we don't expect to have to.
   bool found_code = false;
   Code* stack_check_code = NULL;
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
   if (FLAG_count_based_interrupts) {
     InterruptStub interrupt_stub;
     found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
   } else  // NOLINT
-#endif
   {  // NOLINT
     StackCheckStub check_stub;
     found_code = check_stub.FindCodeInCache(&stack_check_code);
@@ -257,13 +263,14 @@
       }
     }
 
-    if (function->IsMarkedForLazyRecompilation() &&
-        function->shared()->code()->kind() == Code::FUNCTION) {
-      Code* unoptimized = function->shared()->code();
-      int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+    Code* shared_code = function->shared()->code();
+    if (shared_code->kind() != Code::FUNCTION) continue;
+
+    if (function->IsMarkedForLazyRecompilation()) {
+      int nesting = shared_code->allow_osr_at_loop_nesting_level();
       if (nesting == 0) AttemptOnStackReplacement(function);
       int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
-      unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+      shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
     }
 
     // Do not record non-optimizable functions.
@@ -281,7 +288,7 @@
     }
 
     if (FLAG_watch_ic_patching) {
-      int ticks = function->shared()->profiler_ticks();
+      int ticks = shared_code->profiler_ticks();
 
       if (ticks >= kProfilerTicksBeforeOptimization) {
         int typeinfo, total, percentage;
@@ -290,12 +297,10 @@
           // If this particular function hasn't had any ICs patched for enough
           // ticks, optimize it now.
           Optimize(function, "hot and stable");
-        } else if (ticks >= 100) {
-          // If this function does not have enough type info, but has
-          // seen a huge number of ticks, optimize it as it is.
+        } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
           Optimize(function, "not much type info but very hot");
         } else {
-          function->shared()->set_profiler_ticks(ticks + 1);
+          shared_code->set_profiler_ticks(ticks + 1);
           if (FLAG_trace_opt_verbose) {
             PrintF("[not yet optimizing ");
             function->PrintName();
@@ -304,20 +309,12 @@
           }
         }
       } else if (!any_ic_changed_ &&
-          function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
+          shared_code->instruction_size() < kMaxSizeEarlyOpt) {
         // If no IC was patched since the last tick and this function is very
         // small, optimistically optimize it now.
         Optimize(function, "small function");
-      } else if (!code_generated_ &&
-          !any_ic_changed_ &&
-          total_code_generated_ > 0 &&
-          total_code_generated_ < 2000) {
-        // If no code was generated and no IC was patched since the last tick,
-        // but a little code has already been generated since last Reset(),
-        // then type info might already be stable and we can optimize now.
-        Optimize(function, "stable on startup");
       } else {
-        function->shared()->set_profiler_ticks(ticks + 1);
+        shared_code->set_profiler_ticks(ticks + 1);
       }
     } else {  // !FLAG_watch_ic_patching
       samples[sample_count++] = function;
@@ -336,7 +333,6 @@
   }
   if (FLAG_watch_ic_patching) {
     any_ic_changed_ = false;
-    code_generated_ = false;
   } else {  // !FLAG_watch_ic_patching
     // Add the collected functions as samples. It's important not to do
     // this as part of collecting them because this will interfere with
@@ -349,9 +345,7 @@
 
 
 void RuntimeProfiler::NotifyTick() {
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
   if (FLAG_count_based_interrupts) return;
-#endif
   isolate_->stack_guard()->RequestRuntimeProfilerTick();
 }
 
@@ -368,9 +362,7 @@
 
 
 void RuntimeProfiler::Reset() {
-  if (FLAG_watch_ic_patching) {
-    total_code_generated_ = 0;
-  } else {  // !FLAG_watch_ic_patching
+  if (!FLAG_watch_ic_patching) {
     sampler_threshold_ = kSamplerThresholdInit;
     sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
     sampler_ticks_until_threshold_adjustment_ =
@@ -412,7 +404,7 @@
   // undid the decrement done by the profiler thread. Increment again
   // to get the right count of active isolates.
   NoBarrier_AtomicIncrement(&state_, 1);
-  semaphore_->Signal();
+  semaphore.Pointer()->Signal();
 }
 
 
@@ -425,7 +417,7 @@
   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
   ASSERT(old_state >= -1);
   if (old_state != 0) return false;
-  semaphore_->Wait();
+  semaphore.Pointer()->Wait();
   return true;
 }
 
@@ -441,7 +433,7 @@
   if (new_state == 0) {
     // The profiler thread is waiting. Wake it up. It must check for
     // stop conditions before attempting to wait again.
-    semaphore_->Signal();
+    semaphore.Pointer()->Signal();
   }
   thread->Join();
   // The profiler thread is now stopped. Undo the increment in case it
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index f7ca3f0..35213a8 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -63,13 +63,6 @@
 
   void NotifyICChanged() { any_ic_changed_ = true; }
 
-  void NotifyCodeGenerated(int generated_code_size) {
-    if (FLAG_watch_ic_patching) {
-      code_generated_ = true;
-      total_code_generated_ += generated_code_size;
-    }
-  }
-
   // Rate limiting support.
 
   // VM thread interface.
@@ -130,13 +123,11 @@
 
   bool any_ic_changed_;
   bool code_generated_;
-  int total_code_generated_;
 
   // Possible state values:
   //   -1            => the profiler thread is waiting on the semaphore
   //   0 or positive => the number of isolates running JavaScript code.
   static Atomic32 state_;
-  static Semaphore* semaphore_;
 
 #ifdef DEBUG
   static bool has_been_globally_set_up_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 20a9090..0726b3d 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7582,7 +7582,7 @@
     }
   }
   date->SetValue(value, is_value_nan);
-  return *date;
+  return value;
 }
 
 
@@ -8043,8 +8043,6 @@
   ASSERT(args.length() == 1);
   Handle<JSFunction> function = args.at<JSFunction>(0);
 
-  function->shared()->set_profiler_ticks(0);
-
   // If the function is not compiled ignore the lazy
   // recompilation. This can happen if the debugger is activated and
   // the function is returned to the not compiled state.
@@ -8067,6 +8065,7 @@
     function->ReplaceCode(function->shared()->code());
     return function->code();
   }
+  function->shared()->code()->set_profiler_ticks(0);
   if (JSFunction::CompileOptimized(function,
                                    AstNode::kNoNumber,
                                    CLEAR_EXCEPTION)) {
@@ -8358,12 +8357,10 @@
     PrintF("]\n");
   }
   Handle<Code> check_code;
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
   if (FLAG_count_based_interrupts) {
     InterruptStub interrupt_stub;
     check_code = interrupt_stub.GetCode();
   } else  // NOLINT
-#endif
   {  // NOLINT
     StackCheckStub check_stub;
     check_code = check_stub.GetCode();
diff --git a/src/small-pointer-list.h b/src/small-pointer-list.h
index 6c5ce89..75fea06 100644
--- a/src/small-pointer-list.h
+++ b/src/small-pointer-list.h
@@ -69,6 +69,12 @@
     data_ = kEmptyTag;
   }
 
+  void Sort() {
+    if ((data_ & kTagMask) == kListTag) {
+      list()->Sort(compare_value);
+    }
+  }
+
   bool is_empty() const { return length() == 0; }
 
   int length() const {
@@ -159,6 +165,10 @@
  private:
   typedef ZoneList<T*> PointerList;
 
+  static int compare_value(T* const* a, T* const* b) {
+    return Compare<T>(**a, **b);
+  }
+
   static const intptr_t kEmptyTag = 1;
   static const intptr_t kSingletonTag = 0;
   static const intptr_t kListTag = 2;
diff --git a/src/v8.cc b/src/v8.cc
index 98b3038..c6c4ecb 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -36,6 +36,8 @@
 #include "hydrogen.h"
 #include "lithium-allocator.h"
 #include "log.h"
+#include "once.h"
+#include "platform.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
 #include "store-buffer.h"
@@ -43,8 +45,7 @@
 namespace v8 {
 namespace internal {
 
-static Mutex* init_once_mutex = OS::CreateMutex();
-static bool init_once_called = false;
+V8_DECLARE_ONCE(init_once);
 
 bool V8::is_running_ = false;
 bool V8::has_been_set_up_ = false;
@@ -53,7 +54,8 @@
 bool V8::use_crankshaft_ = true;
 List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
 
-static Mutex* entropy_mutex = OS::CreateMutex();
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+
 static EntropySource entropy_source;
 
 
@@ -101,7 +103,13 @@
   ASSERT(isolate->IsDefaultIsolate());
 
   if (!has_been_set_up_ || has_been_disposed_) return;
+
+  ElementsAccessor::TearDown();
+  LOperand::TearDownCaches();
+  RegisteredExtension::UnregisterAll();
+
   isolate->TearDown();
+  delete isolate;
 
   is_running_ = false;
   has_been_disposed_ = true;
@@ -117,7 +125,7 @@
       state[i] = FLAG_random_seed;
     } else if (entropy_source != NULL) {
       uint32_t val;
-      ScopedLock lock(entropy_mutex);
+      ScopedLock lock(entropy_mutex.Pointer());
       entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
       state[i] = val;
     } else {
@@ -237,12 +245,7 @@
   return heap_number;
 }
 
-
-void V8::InitializeOncePerProcess() {
-  ScopedLock lock(init_once_mutex);
-  if (init_once_called) return;
-  init_once_called = true;
-
+void V8::InitializeOncePerProcessImpl() {
   // Set up the platform OS support.
   OS::SetUp();
 
@@ -266,6 +269,12 @@
     FLAG_gc_global = true;
     FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
   }
+
+  LOperand::SetUpCaches();
+}
+
+void V8::InitializeOncePerProcess() {
+  CallOnce(&init_once, &InitializeOncePerProcessImpl);
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index 699c5a0..59ce602 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -116,6 +116,7 @@
   static void FireCallCompletedCallback(Isolate* isolate);
 
  private:
+  static void InitializeOncePerProcessImpl();
   static void InitializeOncePerProcess();
 
   // True if engine is currently running
diff --git a/src/version.cc b/src/version.cc
index f990d93..f2aec00 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     9
-#define BUILD_NUMBER      24
-#define PATCH_LEVEL       3
+#define MINOR_VERSION     10
+#define BUILD_NUMBER      0
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 6bbbbcf..60b29e6 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -161,23 +161,41 @@
   static const int kAllocationIndexByRegisterCode[kNumRegisters];
 };
 
-const Register rax = { 0 };
-const Register rcx = { 1 };
-const Register rdx = { 2 };
-const Register rbx = { 3 };
-const Register rsp = { 4 };
-const Register rbp = { 5 };
-const Register rsi = { 6 };
-const Register rdi = { 7 };
-const Register r8 = { 8 };
-const Register r9 = { 9 };
-const Register r10 = { 10 };
-const Register r11 = { 11 };
-const Register r12 = { 12 };
-const Register r13 = { 13 };
-const Register r14 = { 14 };
-const Register r15 = { 15 };
-const Register no_reg = { -1 };
+const int kRegister_rax_Code = 0;
+const int kRegister_rcx_Code = 1;
+const int kRegister_rdx_Code = 2;
+const int kRegister_rbx_Code = 3;
+const int kRegister_rsp_Code = 4;
+const int kRegister_rbp_Code = 5;
+const int kRegister_rsi_Code = 6;
+const int kRegister_rdi_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_r11_Code = 11;
+const int kRegister_r12_Code = 12;
+const int kRegister_r13_Code = 13;
+const int kRegister_r14_Code = 14;
+const int kRegister_r15_Code = 15;
+const int kRegister_no_reg_Code = -1;
+
+const Register rax = { kRegister_rax_Code };
+const Register rcx = { kRegister_rcx_Code };
+const Register rdx = { kRegister_rdx_Code };
+const Register rbx = { kRegister_rbx_Code };
+const Register rsp = { kRegister_rsp_Code };
+const Register rbp = { kRegister_rbp_Code };
+const Register rsi = { kRegister_rsi_Code };
+const Register rdi = { kRegister_rdi_Code };
+const Register r8 = { kRegister_r8_Code };
+const Register r9 = { kRegister_r9_Code };
+const Register r10 = { kRegister_r10_Code };
+const Register r11 = { kRegister_r11_Code };
+const Register r12 = { kRegister_r12_Code };
+const Register r13 = { kRegister_r13_Code };
+const Register r14 = { kRegister_r14_Code };
+const Register r15 = { kRegister_r15_Code };
+const Register no_reg = { kRegister_no_reg_Code };
 
 
 struct XMMRegister {
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index d616749..2845039 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -5991,42 +5991,45 @@
 };
 
 
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
   // Used in RegExpExecStub.
-  { rbx, rax, rdi, EMIT_REMEMBERED_SET },
+  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
   // Used in CompileArrayPushCall.
-  { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
   // Used in CompileStoreGlobal.
-  { rbx, rcx, rdx, OMIT_REMEMBERED_SET },
+  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
   // Used in StoreStubCompiler::CompileStoreField and
   // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { rdx, rcx, rbx, EMIT_REMEMBERED_SET },
+  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
   // GenerateStoreField calls the stub with two different permutations of
   // registers.  This is the second.
-  { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
   // StoreIC::GenerateNormal via GenerateDictionaryStore.
-  { rbx, r8, r9, EMIT_REMEMBERED_SET },
+  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
   // KeyedStoreIC::GenerateGeneric.
-  { rbx, rdx, rcx, EMIT_REMEMBERED_SET},
+  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
   // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { rdi, rbx, rcx, EMIT_REMEMBERED_SET},
-  { rdx, rdi, rbx, EMIT_REMEMBERED_SET},
+  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
+  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
   // ElementsTransitionGenerator::GenerateSmiOnlyToObject
   // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
   // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { rdx, rbx, rdi, EMIT_REMEMBERED_SET},
-  { rdx, rbx, rdi, OMIT_REMEMBERED_SET},
+  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
+  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
   // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
   // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { rdx, r11, r15, EMIT_REMEMBERED_SET},
+  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
   // ElementsTransitionGenerator::GenerateDoubleToObject
-  { r11, rax, r15, EMIT_REMEMBERED_SET},
+  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
   // StoreArrayLiteralElementStub::Generate
-  { rbx, rax, rcx, EMIT_REMEMBERED_SET},
+  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
   // Null termination.
-  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
 
+#undef REG
 
 bool RecordWriteStub::IsPregenerated() {
   for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 2adf587..40b9a1c 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -111,13 +111,21 @@
 }
 
 
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x1f;
+static const byte kJaeInstruction = 0x73;
+static const byte kJaeOffset = 0x07;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
                                         Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
-  ASSERT(check_code->entry() ==
-         Assembler::target_address_at(call_target_address));
+  ASSERT_EQ(check_code->entry(),
+            Assembler::target_address_at(call_target_address));
   // The stack check code matches the pattern:
   //
   //     cmp rsp, <limit>
@@ -135,11 +143,16 @@
   //     test rax, <loop nesting depth>
   // ok:
   //
-  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
-         *(call_target_address - 2) == 0x07 &&  // offset
-         *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x66;  // 2 byte nop part 1
-  *(call_target_address - 2) = 0x90;  // 2 byte nop part 2
+  if (FLAG_count_based_interrupts) {
+    ASSERT_EQ(kJnsInstruction,       *(call_target_address - 3));
+    ASSERT_EQ(kJnsOffset,            *(call_target_address - 2));
+  } else {
+    ASSERT_EQ(kJaeInstruction,       *(call_target_address - 3));
+    ASSERT_EQ(kJaeOffset,            *(call_target_address - 2));
+  }
+  ASSERT_EQ(kCallInstruction,        *(call_target_address - 1));
+  *(call_target_address - 3) = kNopByteOne;
+  *(call_target_address - 2) = kNopByteTwo;
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
 
@@ -157,11 +170,16 @@
          Assembler::target_address_at(call_target_address));
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT(*(call_target_address - 3) == 0x66 &&  // 2 byte nop part 1
-         *(call_target_address - 2) == 0x90 &&  // 2 byte nop part 2
-         *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x73;  // jae
-  *(call_target_address - 2) = 0x07;  // offset
+  ASSERT_EQ(kNopByteOne,      *(call_target_address - 3));
+  ASSERT_EQ(kNopByteTwo,      *(call_target_address - 2));
+  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+  if (FLAG_count_based_interrupts) {
+    *(call_target_address - 3) = kJnsInstruction;
+    *(call_target_address - 2) = kJnsOffset;
+  } else {
+    *(call_target_address - 3) = kJaeInstruction;
+    *(call_target_address - 2) = kJaeOffset;
+  }
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
 
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 5cbdad7..adeda0b 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -34,6 +34,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "disasm.h"
+#include "lazy-instance.h"
 
 namespace disasm {
 
@@ -269,7 +270,8 @@
 }
 
 
-static InstructionTable instruction_table;
+static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
+    LAZY_INSTANCE_INITIALIZER;
 
 
 static InstructionDesc cmov_instructions[16] = {
@@ -1338,7 +1340,7 @@
     data++;
   }
 
-  const InstructionDesc& idesc = instruction_table.Get(current);
+  const InstructionDesc& idesc = instruction_table.Get().Get(current);
   byte_size_operand_ = idesc.byte_size_operation;
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 85c5e75..4138a16 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -34,6 +34,7 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
@@ -100,11 +101,6 @@
 };
 
 
-int FullCodeGenerator::self_optimization_header_size() {
-  return 20;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
@@ -122,32 +118,11 @@
   CompilationInfo* info = info_;
   handler_table_ =
       isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
-  // We can optionally optimize based on counters rather than statistical
-  // sampling.
-  if (info->ShouldSelfOptimize()) {
-    if (FLAG_trace_opt_verbose) {
-      PrintF("[adding self-optimization header to %s]\n",
-             *info->function()->debug_name()->ToCString());
-    }
-    has_self_optimization_header_ = true;
-    MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
-        Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
-    JSGlobalPropertyCell* cell;
-    if (maybe_cell->To(&cell)) {
-      __ movq(rax, Handle<JSGlobalPropertyCell>(cell),
-              RelocInfo::EMBEDDED_OBJECT);
-      __ SmiAddConstant(FieldOperand(rax, JSGlobalPropertyCell::kValueOffset),
-                        Smi::FromInt(-1));
-      Handle<Code> compile_stub(
-          isolate()->builtins()->builtin(Builtins::kLazyRecompile));
-      __ j(zero, compile_stub, RelocInfo::CODE_TARGET);
-      ASSERT(masm_->pc_offset() == self_optimization_header_size());
-    }
-  }
-
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
       info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -322,14 +297,60 @@
 }
 
 
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+  __ SmiAddConstant(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+                    Smi::FromInt(-delta));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
+    // Self-optimization is a one-off thing; if it fails, don't try again.
+    reset_value = Smi::kMaxValue;
+  }
+  if (isolate()->IsDebuggerActive()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = 10;
+  }
+  __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
+  __ movq(kScratchRegister,
+          reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
+          RelocInfo::NONE);
+  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+          kScratchRegister);
+}
+
+
+static const int kMaxBackEdgeWeight = 127;
+static const int kBackEdgeDistanceDivisor = 162;
+
+
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
                                        Label* back_edge_target) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &ok, Label::kNear);
-  StackCheckStub stub;
-  __ CallStub(&stub);
+
+  if (FLAG_count_based_interrupts) {
+    int weight = 1;
+    if (FLAG_weighted_back_edges) {
+      ASSERT(back_edge_target->is_bound());
+      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kBackEdgeDistanceDivisor));
+    }
+    EmitProfilingCounterDecrement(weight);
+    __ j(positive, &ok, Label::kNear);
+    InterruptStub stub;
+    __ CallStub(&stub);
+  } else {
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(above_equal, &ok, Label::kNear);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+  }
+
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
@@ -342,6 +363,10 @@
   ASSERT(loop_depth() > 0);
   __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
 
+  if (FLAG_count_based_interrupts) {
+    EmitProfilingCounterReset();
+  }
+
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -361,6 +386,31 @@
       __ push(rax);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
+    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
+      // Pretend that the exit is a backwards jump to the entry.
+      int weight = 1;
+      if (info_->ShouldSelfOptimize()) {
+        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+      } else if (FLAG_weighted_back_edges) {
+        int distance = masm_->pc_offset();
+        weight = Min(kMaxBackEdgeWeight,
+                     Max(1, distance = kBackEdgeDistanceDivisor));
+      }
+      EmitProfilingCounterDecrement(weight);
+      Label ok;
+      __ j(positive, &ok, Label::kNear);
+      __ push(rax);
+      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
+        __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
+      } else {
+        InterruptStub stub;
+        __ CallStub(&stub);
+      }
+      __ pop(rax);
+      EmitProfilingCounterReset();
+      __ bind(&ok);
+    }
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
@@ -856,7 +906,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
 
     __ testq(rax, rax);
@@ -1155,7 +1205,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  __ call(ic, mode);
+  CallIC(ic, mode);
 }
 
 
@@ -1236,7 +1286,7 @@
       __ Move(rcx, var->name());
       __ movq(rax, GlobalObjectOperand());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(rax);
       break;
     }
@@ -1446,7 +1496,7 @@
             Handle<Code> ic = is_classic_mode()
                 ? isolate()->builtins()->StoreIC_Initialize()
                 : isolate()->builtins()->StoreIC_Initialize_Strict();
-            __ call(ic, RelocInfo::CODE_TARGET, key->id());
+            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1716,14 +1766,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
 }
 
 
@@ -1745,7 +1795,7 @@
   __ bind(&stub_call);
   __ movq(rax, rcx);
   BinaryOpStub stub(op, mode);
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
 
@@ -1794,7 +1844,7 @@
   __ pop(rdx);
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(rax);
 }
@@ -1835,7 +1885,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
-      __ call(ic);
+      CallIC(ic);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1848,7 +1898,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize()
           : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      __ call(ic);
+      CallIC(ic);
       break;
     }
   }
@@ -1865,7 +1915,7 @@
     Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
-    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
     ASSERT(!var->IsParameter());  // No const parameters.
@@ -1973,7 +2023,7 @@
   Handle<Code> ic = is_classic_mode()
       ? isolate()->builtins()->StoreIC_Initialize()
       : isolate()->builtins()->StoreIC_Initialize_Strict();
-  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2013,7 +2063,7 @@
   Handle<Code> ic = is_classic_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize()
       : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2047,6 +2097,14 @@
 }
 
 
+void FullCodeGenerator::CallIC(Handle<Code> code,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id) {
+  ic_total_count_++;
+  __ call(code, rmode, ast_id);
+}
+
+
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2064,7 +2122,7 @@
   // Call the IC initialization code.
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  __ call(ic, mode, expr->id());
+  CallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2097,7 +2155,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize));  // Key.
-  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -3737,7 +3795,7 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    __ call(ic, mode, expr->id());
+    CallIC(ic, mode, expr->id());
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3895,7 +3953,7 @@
   // accumulator register rax.
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(rax);
 }
 
@@ -4016,7 +4074,7 @@
     __ movq(rdx, rax);
     __ Move(rax, Smi::FromInt(1));
   }
-  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4050,7 +4108,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->StoreIC_Initialize()
           : isolate()->builtins()->StoreIC_Initialize_Strict();
-      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4067,7 +4125,7 @@
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize()
           : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4094,7 +4152,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    __ call(ic);
+    CallIC(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(rax);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4274,7 +4332,7 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 2ba2c57..1f5ec4b 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3953,12 +3953,21 @@
 }
 
 
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
+
+  Label success;
+  SmallMapList* map_set = instr->hydrogen()->map_set();
+  for (int i = 0; i < map_set->length() - 1; i++) {
+    Handle<Map> map = map_set->at(i);
+    __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
+    __ j(equal, &success);
+  }
+  Handle<Map> map = map_set->last();
+  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+  __ bind(&success);
 }
 
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index d3e4cdd..7c47df6 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1737,9 +1737,9 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new(zone()) LCheckMap(value);
+  LCheckMaps* result = new(zone()) LCheckMaps(value);
   return AssignEnvironment(result);
 }
 
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 2d8fd2e..99f28f0 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -71,7 +71,7 @@
   V(CallStub)                                   \
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
-  V(CheckMap)                                   \
+  V(CheckMaps)                                  \
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -1857,14 +1857,14 @@
 };
 
 
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
  public:
-  explicit LCheckMap(LOperand* value) {
+  explicit LCheckMaps(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
-  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2118886..f7db250 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -799,8 +799,15 @@
 }
 
 
-static const Register saved_regs[] =
-    { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+#define REG(Name) { kRegister_ ## Name ## _Code }
+
+static const Register saved_regs[] = {
+  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
+  REG(r9), REG(r10), REG(r11)
+};
+
+#undef REG
+
 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
 
 
@@ -2418,7 +2425,8 @@
 
 // Order general registers are pushed by Pushad:
 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+const int
+MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
     0,
     1,
     2,
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 86eb312..6bb5cfe 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1307,7 +1307,7 @@
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-  static int kSafepointPushRegisterIndices[Register::kNumRegisters];
+  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
   static const int kNumSafepointSavedRegisters = 11;
   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 837c254..a66bde8 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -572,6 +572,42 @@
 }
 
 
+void RegExpMacroAssemblerX64::CheckCharacterInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_in_range) {
+  __ leal(rax, Operand(current_character(), -from));
+  __ cmpl(rax, Immediate(to - from));
+  BranchOrBacktrack(below_equal, on_in_range);
+}
+
+
+void RegExpMacroAssemblerX64::CheckCharacterNotInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_not_in_range) {
+  __ leal(rax, Operand(current_character(), -from));
+  __ cmpl(rax, Immediate(to - from));
+  BranchOrBacktrack(above, on_not_in_range);
+}
+
+
+void RegExpMacroAssemblerX64::CheckBitInTable(
+    Handle<ByteArray> table,
+    Label* on_bit_set) {
+  __ Move(rax, table);
+  Register index = current_character();
+  if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+    __ movq(rbx, current_character());
+    __ and_(rbx, Immediate(kTableMask));
+    index = rbx;
+  }
+  __ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
+          Immediate(0));
+  BranchOrBacktrack(not_equal, on_bit_set);
+}
+
+
 bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
                                                          Label* on_no_match) {
   // Range checks (c in min..max) are generally implemented by an unsigned
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 7102225..cd24b60 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -75,6 +75,14 @@
                                               uc16 minus,
                                               uc16 mask,
                                               Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
   // Checks whether the given offset from the current position is before
   // the end of the string.
   virtual void CheckPosition(int cp_offset, Label* on_outside_input);