Push version 1.3.11 to trunk.

Fixed crash in error reporting during bootstrapping.

Optimized generated IA32 math code by using SSE2 instructions when available.

Implemented missing pieces of debugger infrastructure on ARM.  The debugger is now fully functional on ARM.

Make 'hidden' the default visibility for gcc.




git-svn-id: http://v8.googlecode.com/svn/trunk@2891 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index a78755b..009296c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,16 @@
+2009-09-15: Version 1.3.11
+
+        Fixed crash in error reporting during bootstrapping.
+
+        Optimized generated IA32 math code by using SSE2 instructions when
+        available.
+
+        Implemented missing pieces of debugger infrastructure on ARM.  The
+        debugger is now fully functional on ARM.
+
+        Make 'hidden' the default visibility for gcc.
+
+
 2009-09-09: Version 1.3.10
 
         Fixed profiler on Mac in 64-bit mode.
diff --git a/SConstruct b/SConstruct
index ddd0190..b5aa7ab 100644
--- a/SConstruct
+++ b/SConstruct
@@ -96,13 +96,18 @@
 
 LIBRARY_FLAGS = {
   'all': {
-    'CPPDEFINES':   ['ENABLE_LOGGING_AND_PROFILING'],
     'CPPPATH': [join(root_dir, 'src')],
     'regexp:native': {
         'CPPDEFINES': ['V8_NATIVE_REGEXP']
     },
     'mode:debug': {
       'CPPDEFINES': ['V8_ENABLE_CHECKS']
+    },
+    'profilingsupport:on': {
+      'CPPDEFINES':   ['ENABLE_LOGGING_AND_PROFILING'],
+    },
+    'debuggersupport:on': {
+      'CPPDEFINES':   ['ENABLE_DEBUGGER_SUPPORT'],
     }
   },
   'gcc': {
@@ -110,11 +115,14 @@
       'CCFLAGS':      ['$DIALECTFLAGS', '$WARNINGFLAGS'],
       'CXXFLAGS':     ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
     },
+    'visibility:hidden': {
+      # Use visibility=default to disable this.
+      'CXXFLAGS':     ['-fvisibility=hidden']
+    },
     'mode:debug': {
       'CCFLAGS':      ['-g', '-O0'],
       'CPPDEFINES':   ['ENABLE_DISASSEMBLER', 'DEBUG'],
       'os:android': {
-        'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
         'CCFLAGS':    ['-mthumb']
       }
     },
@@ -123,7 +131,7 @@
                        '-ffunction-sections'],
       'os:android': {
         'CCFLAGS':    ['-mthumb', '-Os'],
-        'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT']
+        'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
       }
     },
     'os:linux': {
@@ -229,7 +237,6 @@
 V8_EXTRA_FLAGS = {
   'gcc': {
     'all': {
-      'CXXFLAGS':     [], #['-fvisibility=hidden'],
       'WARNINGFLAGS': ['-Wall',
                        '-Werror',
                        '-W',
@@ -576,6 +583,16 @@
     'default': 'static',
     'help': 'the type of library to produce'
   },
+  'profilingsupport': {
+    'values': ['on', 'off'],
+    'default': 'on',
+    'help': 'enable profiling of JavaScript code'
+  },
+  'debuggersupport': {
+    'values': ['on', 'off'],
+    'default': 'on',
+    'help': 'enable debugging of JavaScript code'
+  },
   'soname': {
     'values': ['on', 'off'],
     'default': 'off',
@@ -615,6 +632,11 @@
     'values': ['on', 'off'],
     'default': 'off',
     'help': 'more output from compiler and linker'
+  },
+  'visibility': {
+    'values': ['default', 'hidden'],
+    'default': 'hidden',
+    'help': 'shared library symbol visibility'
   }
 }
 
@@ -794,6 +816,10 @@
       # Print a warning if arch has explicitly been set
       print "Warning: forcing architecture to match simulator (%s)" % options['simulator']
     options['arch'] = options['simulator']
+  if (options['prof'] != 'off') and (options['profilingsupport'] == 'off'):
+    # Print a warning if profiling is enabled without profiling support
+    print "Warning: forcing profilingsupport on when prof is on"
+    options['profilingsupport'] = 'on'
 
 
 def ParseEnvOverrides(arg, imports):
diff --git a/include/v8.h b/include/v8.h
index 2789bad..24dc6d1 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2725,9 +2725,9 @@
 
   // These constants are compiler dependent so their values must be
   // defined within the implementation.
-  static int kJSObjectType;
-  static int kFirstNonstringType;
-  static int kProxyType;
+  V8EXPORT static int kJSObjectType;
+  V8EXPORT static int kFirstNonstringType;
+  V8EXPORT static int kProxyType;
 
   static inline bool HasHeapObjectTag(internal::Object* value) {
     return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
diff --git a/src/api.cc b/src/api.cc
index 1128d3e..052e875 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2672,9 +2672,7 @@
   }
   // Leave V8.
 
-  if (!ApiCheck(!env.is_null(),
-                "v8::Context::New()",
-                "Could not initialize environment"))
+  if (env.is_null())
     return Persistent<Context>();
   return Persistent<Context>(Utils::ToLocal(env));
 }
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index cb5faa2..cd5a1bb 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -105,40 +105,45 @@
 
 Address RelocInfo::call_address() {
   ASSERT(IsCallInstruction());
-  UNIMPLEMENTED();
-  return NULL;
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
   ASSERT(IsCallInstruction());
-  UNIMPLEMENTED();
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
 }
 
 
 Object* RelocInfo::call_object() {
-  ASSERT(IsCallInstruction());
-  UNIMPLEMENTED();
-  return NULL;
+  return *call_object_address();
 }
 
 
 Object** RelocInfo::call_object_address() {
   ASSERT(IsCallInstruction());
-  UNIMPLEMENTED();
-  return NULL;
+  // The 2 instructions offset assumes patched return sequence.
+  ASSERT(IsJSReturn(rmode()));
+  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
 }
 
 
 void RelocInfo::set_call_object(Object* target) {
-  ASSERT(IsCallInstruction());
-  UNIMPLEMENTED();
+  *call_object_address() = target;
 }
 
 
 bool RelocInfo::IsCallInstruction() {
-  UNIMPLEMENTED();
-  return false;
+  // On ARM a "call instruction" is actually two instructions.
+  //   mov lr, pc
+  //   ldr pc, [pc, #XXX]
+  return (Assembler::instr_at(pc_) == kMovLrPc)
+          && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
+              == kLdrPCPattern);
 }
 
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 8bd06db..bc3b8e6 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -93,7 +93,14 @@
 
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
-  UNIMPLEMENTED();
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
 }
 
 
@@ -232,6 +239,10 @@
 // register r is not encoded.
 static const Instr kPopRegPattern =
     al | B26 | L | 4 | PostIndex | sp.code() * B16;
+// mov lr, pc
+const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+// ldr pc, [pc, #XXX]
+const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
 
 // spare_buffer_
 static const int kMinimalBufferSize = 4*KB;
@@ -1301,6 +1312,13 @@
 
 
 // Debugging
+void Assembler::RecordJSReturn() {
+  WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
 void Assembler::RecordComment(const char* msg) {
   if (FLAG_debug_code) {
     CheckBuffer();
@@ -1387,16 +1405,20 @@
     RelocInfo& rinfo = prinfo_[i];
     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
            rinfo.rmode() != RelocInfo::POSITION);
-    rinfo.set_pc(rinfo.pc() + pc_delta);
+    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+      rinfo.set_pc(rinfo.pc() + pc_delta);
+    }
   }
 }
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
-  if (rmode >= RelocInfo::COMMENT && rmode <= RelocInfo::STATEMENT_POSITION) {
-    // adjust code for new modes
-    ASSERT(RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode));
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+    // Adjust code for new modes
+    ASSERT(RelocInfo::IsJSReturn(rmode)
+           || RelocInfo::IsComment(rmode)
+           || RelocInfo::IsPosition(rmode));
     // these modes do not need an entry in the constant pool
   } else {
     ASSERT(num_prinfo_ < kMaxNumPRInfo);
@@ -1490,6 +1512,7 @@
            rinfo.rmode() != RelocInfo::POSITION &&
            rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
     Instr instr = instr_at(rinfo.pc());
+
     // Instruction to patch must be a ldr/str [pc, #offset]
     // P and U set, B and W clear, Rn == pc, offset12 still 0
     ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 63f0447..7e43f2e 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -376,6 +376,10 @@
 typedef int32_t Instr;
 
 
+extern const Instr kMovLrPc;
+extern const Instr kLdrPCPattern;
+
+
 class Assembler : public Malloced {
  public:
   // Create an assembler. Instructions and relocation information are emitted
@@ -433,12 +437,16 @@
   INLINE(static Address target_address_at(Address pc));
   INLINE(static void set_target_address_at(Address pc, Address target));
 
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
   // Distance between the instruction referring to the address of the call
   // target (ldr pc, [target addr in const pool]) and the return address
-  static const int kPatchReturnSequenceLength = sizeof(Instr);
+  static const int kCallTargetAddressOffset = kInstrSize;
+
   // Distance between start of patched return sequence and the emitted address
   // to jump to.
-  static const int kPatchReturnSequenceAddressOffset = 1;
+  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
 
   // Difference between address of current opcode and value read from pc
   // register.
@@ -652,9 +660,16 @@
   // Jump unconditionally to given label.
   void jmp(Label* L) { b(L, al); }
 
+  // Check the code size generated from label to here.
+  int InstructionsGeneratedSince(Label* l) {
+    return (pc_offset() - l->pos()) / kInstrSize;
+  }
 
   // Debugging
 
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
   // Record a comment relocation entry that can be used by a disassembler.
   // Use --debug_code to enable.
   void RecordComment(const char* msg);
@@ -671,7 +686,7 @@
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
   // Read/patch instructions
-  Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
   }
@@ -708,7 +723,6 @@
   int next_buffer_check_;  // pc offset of next buffer check
 
   // Code generation
-  static const int kInstrSize = sizeof(Instr);  // signed size
   // The relocation writer's position is at least kGap bytes below the end of
   // the generated instructions. This is so that multi-instruction sequences do
   // not have to check for overflow. The same is true for writes of large
@@ -795,6 +809,8 @@
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
 
   friend class RegExpMacroAssemblerARM;
+  friend class RelocInfo;
+  friend class CodePatcher;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7b3662d..9ef879a 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -299,7 +299,10 @@
   }
 
   // Generate the return sequence if necessary.
-  if (frame_ != NULL || function_return_.is_linked()) {
+  if (has_valid_frame() || function_return_.is_linked()) {
+    if (!function_return_.is_linked()) {
+      CodeForReturnPosition(fun);
+    }
     // exit
     // r0: result
     // sp: stack pointer
@@ -315,12 +318,23 @@
       frame_->CallRuntime(Runtime::kTraceExit, 1);
     }
 
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+
     // Tear down the frame which will restore the caller's frame pointer and
     // the link register.
     frame_->Exit();
 
-    __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
-    __ Jump(lr);
+    // Here we use masm_-> instead of the __ macro to avoid the code coverage
+    // tool from instrumenting as we rely on the code size here.
+    masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+    masm_->Jump(lr);
+
+    // Check that the size of the code used for returning matches what is
+    // expected by the debugger.
+    ASSERT_EQ(kJSReturnSequenceLength,
+              masm_->InstructionsGeneratedSince(&check_exit_codesize));
   }
 
   // Code generation state must be reset.
@@ -1111,10 +1125,10 @@
   if (FLAG_check_stack) {
     Comment cmnt(masm_, "[ check stack");
     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-    // Put the lr setup instruction in the delay slot.  The 'sizeof(Instr)' is
-    // added to the implicit 8 byte offset that always applies to operations
-    // with pc and gives a return address 12 bytes down.
-    masm_->add(lr, pc, Operand(sizeof(Instr)));
+    // Put the lr setup instruction in the delay slot.  kInstrSize is added to
+    // the implicit 8 byte offset that always applies to operations with pc and
+    // gives a return address 12 bytes down.
+    masm_->add(lr, pc, Operand(Assembler::kInstrSize));
     masm_->cmp(sp, Operand(ip));
     StackCheckStub stub;
     // Call the stub if lower.
@@ -1380,16 +1394,12 @@
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ ReturnStatement");
 
+  CodeForStatementPosition(node);
+  LoadAndSpill(node->expression());
   if (function_return_is_shadowed_) {
-    CodeForStatementPosition(node);
-    LoadAndSpill(node->expression());
     frame_->EmitPop(r0);
     function_return_.Jump();
   } else {
-    // Load the returned value.
-    CodeForStatementPosition(node);
-    LoadAndSpill(node->expression());
-
     // Pop the result from the frame and prepare the frame for
     // returning thus making it easier to merge.
     frame_->EmitPop(r0);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 70a7b27..b28e965 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -180,6 +180,10 @@
 
   static const int kUnknownIntValue = -1;
 
+  // Number of instructions used for the JS return sequence. The constant is
+  // used by the debugger to patch the JS return sequence.
+  static const int kJSReturnSequenceLength = 4;
+
  private:
   // Construction/Destruction
   CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e142841..4f45175 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -34,28 +34,41 @@
 namespace internal {
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-// Currently debug break is not supported in frame exit code on ARM.
 bool BreakLocationIterator::IsDebugBreakAtReturn() {
-  return false;
+  return Debug::IsDebugBreakAtReturn(rinfo());
 }
 
 
-// Currently debug break is not supported in frame exit code on ARM.
 void BreakLocationIterator::SetDebugBreakAtReturn() {
-  UNIMPLEMENTED();
+  // Patch the code changing the return from JS function sequence from
+  //   mov sp, fp
+  //   ldmia sp!, {fp, lr}
+  //   add sp, sp, #4
+  //   bx lr
+  // to a call to the debug break return code.
+  //   mov lr, pc
+  //   ldr pc, [pc, #-4]
+  //   <debug break return code entry point address>
+  //   bktp 0
+  CodePatcher patcher(rinfo()->pc(), 4);
+  patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
+  patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
+  patcher.Emit(Debug::debug_break_return()->entry());
+  patcher.masm()->bkpt(0);
 }
 
 
-// Currently debug break is not supported in frame exit code on ARM.
+// Restore the JS frame exit code.
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
-  UNIMPLEMENTED();
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     CodeGenerator::kJSReturnSequenceLength);
 }
 
 
+// A debug break in the exit code is identified by a call.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  // Currently debug break is not supported in frame exit code on ARM.
-  return false;
+  return rinfo->IsCallInstruction();
 }
 
 
@@ -95,8 +108,6 @@
 
   __ LeaveInternalFrame();
 
-  // Inlined ExitJSFrame ends here.
-
   // Finally restore all registers.
   __ RestoreRegistersFromMemory(kJSCallerSaved);
 
@@ -138,12 +149,20 @@
 
 
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  // Keyed load IC not implemented on ARM.
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  Generate_DebugBreakCallHelper(masm, 0);
 }
 
 
 void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // Keyed store IC not implemented on ARM.
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  Generate_DebugBreakCallHelper(masm, 0);
 }
 
 
@@ -180,7 +199,10 @@
 
 
 void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
-  // Generate nothing as CodeStub CallFunction is not used on ARM.
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0);
 }
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index de2db90..8e1eda9 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -133,7 +133,7 @@
   // and the target address of the call would be referenced by the first
   // instruction rather than the second one, which would make it harder to patch
   // (two instructions before the return address, instead of one).
-  ASSERT(kPatchReturnSequenceLength == sizeof(Instr));
+  ASSERT(kCallTargetAddressOffset == kInstrSize);
 }
 
 
@@ -167,7 +167,7 @@
   add(pc, pc, Operand(index,
                       LSL,
                       assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
-  BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * sizeof(Instr));
+  BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
   nop();  // Jump table alignment.
   for (int i = 0; i < targets.length(); i++) {
     b(targets[i]);
@@ -1054,7 +1054,7 @@
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
         Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
-    Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+    Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
   }
 }
@@ -1072,7 +1072,7 @@
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
         Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
-    Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+    Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
   }
 
@@ -1153,4 +1153,38 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+CodePatcher::CodePatcher(byte* address, int instructions)
+    : address_(address),
+      instructions_(instructions),
+      size_(instructions * Assembler::kInstrSize),
+      masm_(address, size_ + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr x) {
+  masm()->emit(x);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+  masm()->emit(reinterpret_cast<Instr>(addr));
+}
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
 } }  // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index f45cce5..03aa4d0 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -339,6 +339,35 @@
 };
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int instructions);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+  // Emit an instruction directly.
+  void Emit(Instr x);
+
+  // Emit an address directly.
+  void Emit(Address addr);
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int instructions_;  // Number of instructions of the expected patch size.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 5b5c870..2d5b140 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -127,6 +127,10 @@
 
 void VirtualFrame::Exit() {
   Comment cmnt(masm(), "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
   // Drop the execution stack down to the frame pointer and restore the caller
   // frame pointer and return address.
   __ mov(sp, fp);
@@ -149,10 +153,10 @@
     __ push(ip);
   }
   if (FLAG_check_stack) {
-    // Put the lr setup instruction in the delay slot.  The 'sizeof(Instr)' is
-    // added to the implicit 8 byte offset that always applies to operations
-    // with pc and gives a return address 12 bytes down.
-    masm()->add(lr, pc, Operand(sizeof(Instr)));
+    // Put the lr setup instruction in the delay slot.  The kInstrSize is added
+    // to the implicit 8 byte offset that always applies to operations with pc
+    // and gives a return address 12 bytes down.
+    masm()->add(lr, pc, Operand(Assembler::kInstrSize));
     masm()->cmp(sp, Operand(r2));
     StackCheckStub stub;
     // Call the stub if lower.
diff --git a/src/assembler.cc b/src/assembler.cc
index 3563ebd..d81b4b0 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -494,7 +494,7 @@
       Address addr = target_address();
       ASSERT(addr != NULL);
       // Check that we can find the right code object.
-      HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
+      Code* code = Code::GetCodeFromTargetAddress(addr);
       Object* found = Heap::FindCodeObject(addr);
       ASSERT(found->IsCode());
       ASSERT(code->address() == HeapObject::cast(found)->address());
diff --git a/src/debug.cc b/src/debug.cc
index f3e11ae..3c2bfa8 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1604,7 +1604,7 @@
   // Find the call address in the running code. This address holds the call to
   // either a DebugBreakXXX or to the debug break return entry code if the
   // break point is still active after processing the break point.
-  Address addr = frame->pc() - Assembler::kPatchReturnSequenceLength;
+  Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
 
   // Check if the location is at JS exit.
   bool at_js_return = false;
diff --git a/src/factory.cc b/src/factory.cc
index bb6987b..d91b266 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -401,10 +401,12 @@
                                  const char* type,
                                  Handle<JSArray> args) {
   Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
-  Handle<JSFunction> fun =
-      Handle<JSFunction>(
-          JSFunction::cast(
-              Top::builtins()->GetProperty(*make_str)));
+  Handle<Object> fun_obj(Top::builtins()->GetProperty(*make_str));
+  // If the builtins haven't been properly configured yet this error
+  // constructor may not have been defined.  Bail out.
+  if (!fun_obj->IsJSFunction())
+    return Factory::undefined_value();
+  Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
   Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
   Object** argv[2] = { type_obj.location(),
                        Handle<Object>::cast(args).location() };
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 02bde2a..b8dda17 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -157,6 +157,9 @@
   for (int i = 0; i < instruction_count; i++) {
     *(pc_ + i) = *(instructions + i);
   }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count);
 }
 
 
@@ -164,12 +167,25 @@
 // Additional guard int3 instructions can be added if required.
 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
   // Call instruction takes up 5 bytes and int3 takes up one byte.
-  int code_size = 5 + guard_bytes;
+  static const int kCallCodeSize = 5;
+  int code_size = kCallCodeSize + guard_bytes;
+
+  // Create a code patcher.
+  CodePatcher patcher(pc_, code_size);
+
+  // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+  Label check_codesize;
+  patcher.masm()->bind(&check_codesize);
+#endif
 
   // Patch the code.
-  CodePatcher patcher(pc_, code_size);
   patcher.masm()->call(target, RelocInfo::NONE);
 
+  // Check that the size of the code generated is as expected.
+  ASSERT_EQ(kCallCodeSize,
+            patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+
   // Add the requested number of int3 instructions after the call.
   for (int i = 0; i < guard_bytes; i++) {
     patcher.masm()->int3();
@@ -721,10 +737,10 @@
   ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  UNIMPLEMENTED();
-  USE(cc);
-  USE(dst);
-  USE(src);
+  // Opcode: 0f 40 + cc /r
+  EMIT(0x0F);
+  EMIT(0x40 + cc);
+  emit_operand(dst, src);
 }
 
 
@@ -866,6 +882,13 @@
 }
 
 
+void Assembler::cmp(const Operand& op, Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_arith(7, op, Immediate(handle));
+}
+
+
 void Assembler::cmpb_al(const Operand& op) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1947,6 +1970,17 @@
 }
 
 
+void Assembler::comisd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x2F);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::movdbl(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 6a90e07..610017b 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -437,7 +437,7 @@
 
   // Distance between the address of the code target in the call instruction
   // and the return address
-  static const int kPatchReturnSequenceLength = kPointerSize;
+  static const int kCallTargetAddressOffset = kPointerSize;
   // Distance between start of patched return sequence and the emitted address
   // to jump to.
   static const int kPatchReturnSequenceAddressOffset = 1;  // JMP imm32.
@@ -539,6 +539,7 @@
   void cmp(Register reg, Handle<Object> handle);
   void cmp(Register reg, const Operand& op);
   void cmp(const Operand& op, const Immediate& imm);
+  void cmp(const Operand& op, Handle<Object> handle);
 
   void dec_b(Register dst);
 
@@ -719,6 +720,8 @@
   void mulsd(XMMRegister dst, XMMRegister src);
   void divsd(XMMRegister dst, XMMRegister src);
 
+  void comisd(XMMRegister dst, XMMRegister src);
+
   // Use either movsd or movlpd.
   void movdbl(XMMRegister dst, const Operand& src);
   void movdbl(const Operand& dst, XMMRegister src);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 400a360..d9f6672 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -768,6 +768,11 @@
   static void CheckFloatOperands(MacroAssembler* masm,
                                  Label* non_float,
                                  Register scratch);
+  // Test if operands are numbers (smi or HeapNumber objects), and load
+  // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
+  // either operand is not a number.  Operands are in edx and eax.
+  // Leaves operands unchanged.
+  static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
   // Allocate a heap number in new space with undefined value.
   // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
   static void AllocateHeapNumber(MacroAssembler* masm,
@@ -6699,41 +6704,79 @@
     case Token::DIV: {
       // eax: y
       // edx: x
-      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
-      // Fast-case: Both operands are numbers.
-      // Allocate a heap number, if needed.
-      Label skip_allocation;
-      switch (mode_) {
-        case OVERWRITE_LEFT:
-          __ mov(eax, Operand(edx));
-          // Fall through!
-        case OVERWRITE_RIGHT:
-          // If the argument in eax is already an object, we skip the
-          // allocation of a heap number.
-          __ test(eax, Immediate(kSmiTagMask));
-          __ j(not_zero, &skip_allocation, not_taken);
-          // Fall through!
-        case NO_OVERWRITE:
-          FloatingPointHelper::AllocateHeapNumber(masm,
-                                                  &call_runtime,
-                                                  ecx,
-                                                  edx,
-                                                  eax);
-          __ bind(&skip_allocation);
-          break;
-        default: UNREACHABLE();
-      }
-      FloatingPointHelper::LoadFloatOperands(masm, ecx);
 
-      switch (op_) {
-        case Token::ADD: __ faddp(1); break;
-        case Token::SUB: __ fsubp(1); break;
-        case Token::MUL: __ fmulp(1); break;
-        case Token::DIV: __ fdivp(1); break;
-        default: UNREACHABLE();
+      if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+        CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+        FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+            __ mov(eax, Operand(edx));
+            // Fall through!
+          case OVERWRITE_RIGHT:
+            // If the argument in eax is already an object, we skip the
+            // allocation of a heap number.
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm,
+                                                    &call_runtime,
+                                                    ecx,
+                                                    edx,
+                                                    eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(2 * kPointerSize);
+
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+            __ mov(eax, Operand(edx));
+            // Fall through!
+          case OVERWRITE_RIGHT:
+            // If the argument in eax is already an object, we skip the
+            // allocation of a heap number.
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            FloatingPointHelper::AllocateHeapNumber(masm,
+                                                    &call_runtime,
+                                                    ecx,
+                                                    edx,
+                                                    eax);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(2 * kPointerSize);
       }
-      __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-      __ ret(2 * kPointerSize);
     }
     case Token::MOD: {
       // For MOD we go directly to runtime in the non-smi case.
@@ -6981,6 +7024,38 @@
 }
 
 
+void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+                                           Label* not_numbers) {
+  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+  // Load operand in edx into xmm0, or branch to not_numbers.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(not_equal, not_numbers);  // Argument in edx is not a number.
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ bind(&load_eax);
+  // Load operand in eax into xmm1, or branch to not_numbers.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
+  __ j(equal, &load_float_eax);
+  __ jmp(not_numbers);  // Argument in eax is not a number.
+  __ bind(&load_smi_edx);
+  __ sar(edx, 1);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ shl(edx, 1);  // Retag smi for heap number overwriting test.
+  __ jmp(&load_eax);
+  __ bind(&load_smi_eax);
+  __ sar(eax, 1);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm1, Operand(eax));
+  __ shl(eax, 1);  // Retag smi for heap number overwriting test.
+  __ jmp(&done);
+  __ bind(&load_float_eax);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ bind(&done);
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch) {
   Label load_smi_1, load_smi_2, done_load_1, done;
@@ -7343,28 +7418,56 @@
   // Inlined floating point compare.
   // Call builtin if operands are not floating point or smi.
   Label check_for_symbols;
-  FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
-  FloatingPointHelper::LoadFloatOperands(masm, ecx);
-  __ FCmp();
+  Label unordered;
+  if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
+    CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+    CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
 
-  // Jump to builtin for NaN.
-  __ j(parity_even, &call_builtin, not_taken);
+    FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+    __ comisd(xmm0, xmm1);
 
-  // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
-  Label below_lbl, above_lbl;
-  // use edx, eax to convert unsigned to signed comparison
-  __ j(below, &below_lbl, not_taken);
-  __ j(above, &above_lbl, not_taken);
+    // Jump to builtin for NaN.
+    __ j(parity_even, &unordered, not_taken);
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, Operand(ecx));
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, Operand(ecx));
+    __ ret(2 * kPointerSize);
+  } else {
+    FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
+    FloatingPointHelper::LoadFloatOperands(masm, ecx);
+    __ FCmp();
 
-  __ xor_(eax, Operand(eax));  // equal
-  __ ret(2 * kPointerSize);
+    // Jump to builtin for NaN.
+    __ j(parity_even, &unordered, not_taken);
 
-  __ bind(&below_lbl);
-  __ mov(eax, -1);
-  __ ret(2 * kPointerSize);
+    Label below_lbl, above_lbl;
+    // Return a result of -1, 0, or 1, to indicate result of comparison.
+    __ j(below, &below_lbl, not_taken);
+    __ j(above, &above_lbl, not_taken);
 
-  __ bind(&above_lbl);
-  __ mov(eax, 1);
+    __ xor_(eax, Operand(eax));  // equal
+    // Both arguments were pushed in case a runtime call was needed.
+    __ ret(2 * kPointerSize);
+
+    __ bind(&below_lbl);
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+    __ ret(2 * kPointerSize);
+
+    __ bind(&above_lbl);
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+    __ ret(2 * kPointerSize);  // eax, edx were pushed
+  }
+  // If one of the numbers was NaN, then the result is always false.
+  // The cc is never not-equal.
+  __ bind(&unordered);
+  ASSERT(cc_ != not_equal);
+  if (cc_ == less || cc_ == less_equal) {
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+  } else {
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+  }
   __ ret(2 * kPointerSize);  // eax, edx were pushed
 
   // Fast negative check for symbol-to-symbol equality.
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 9a2753d..c05a5ca 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -840,7 +840,7 @@
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
   Address test_instruction_address =
-      address + Assembler::kPatchReturnSequenceLength;
+      address + Assembler::kCallTargetAddressOffset;
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
   if (*test_instruction_address != kTestEaxByte) return false;
@@ -867,7 +867,7 @@
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
   Address test_instruction_address =
-      address + Assembler::kPatchReturnSequenceLength;
+      address + Assembler::kCallTargetAddressOffset;
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
   if (*test_instruction_address != kTestEaxByte) return false;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 79b3089..a8d7e44 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -319,7 +319,7 @@
 
 
 void MacroAssembler::FCmp() {
-  fcompp();
+  fucompp();
   push(eax);
   fnstsw_ax();
   sahf();
@@ -1170,7 +1170,6 @@
 }
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
@@ -1188,7 +1187,6 @@
   ASSERT(masm_.pc_ == address_ + size_);
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
-#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
 } }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index fa61183..60ede8a 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -338,7 +338,6 @@
 };
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. Is not legal to emit
@@ -357,7 +356,6 @@
   int size_;  // Number of bytes of the expected patch size.
   MacroAssembler masm_;  // Macro assembler used to generate the code.
 };
-#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
 // -----------------------------------------------------------------------------
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 38d61dc..131f77b 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -38,7 +38,7 @@
 
 Address IC::address() {
   // Get the address of the call.
-  Address result = pc() - Assembler::kPatchReturnSequenceLength;
+  Address result = pc() - Assembler::kCallTargetAddressOffset;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // First check if any break points are active if not just return the address
diff --git a/src/ic.cc b/src/ic.cc
index 393ccbf..264b99c 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -122,7 +122,7 @@
   // Get the address of the call site in the active code. This is the
   // place where the call to DebugBreakXXX is and where the IC
   // normally would be.
-  Address addr = pc() - Assembler::kPatchReturnSequenceLength;
+  Address addr = pc() - Assembler::kCallTargetAddressOffset;
   // Return the address in the original code. This is the place where
   // the call which has been overwritten by the DebugBreakXXX resides
   // and the place where the inline cache system should look.
diff --git a/src/ic.h b/src/ic.h
index 007b035..fcf1ec0 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -390,7 +390,7 @@
   // Support for patching the map that is checked in an inlined
   // version of keyed store.
   // The address is the patch point for the IC call
-  // (Assembler::kPatchReturnSequenceLength before the end of
+  // (Assembler::kCallTargetAddressOffset before the end of
   // the call/return address).
   // The map is the new map that the inlined code should check against.
   static bool PatchInlinedStore(Address address, Object* map);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e682fe2..8a51541 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -279,7 +279,7 @@
 
   void VisitCodeTarget(RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Code* code = CodeFromDerivedPointer(rinfo->target_address());
+    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
     if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
       IC::Clear(rinfo->pc());
       // Please note targets for cleared inline cached do not have to be
@@ -289,7 +289,7 @@
     }
     if (IsCompacting()) {
       // When compacting we convert the target to a real object pointer.
-      code = CodeFromDerivedPointer(rinfo->target_address());
+      code = Code::GetCodeFromTargetAddress(rinfo->target_address());
       rinfo->set_target_object(code);
     }
   }
@@ -297,7 +297,7 @@
   void VisitDebugTarget(RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
            rinfo->IsCallInstruction());
-    HeapObject* code = CodeFromDerivedPointer(rinfo->call_address());
+    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
     MarkCompactCollector::MarkObject(code);
     // When compacting we convert the call to a real object pointer.
     if (IsCompacting()) rinfo->set_call_object(code);
@@ -314,13 +314,6 @@
   // Tells whether the mark sweep collection will perform compaction.
   bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
 
-  // Retrieves the Code pointer from derived code entry.
-  Code* CodeFromDerivedPointer(Address addr) {
-    ASSERT(addr != NULL);
-    return reinterpret_cast<Code*>(
-        HeapObject::FromAddress(addr - Code::kHeaderSize));
-  }
-
   // Visit an unmarked object.
   void VisitUnmarkedObject(HeapObject* obj) {
 #ifdef DEBUG
diff --git a/src/objects.cc b/src/objects.cc
index 583af7c..2b6f83f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -4967,7 +4967,7 @@
        !it.done(); it.next()) {
     Address ic_addr = it.rinfo()->target_address();
     ASSERT(ic_addr != NULL);
-    HeapObject* code = HeapObject::FromAddress(ic_addr - Code::kHeaderSize);
+    HeapObject* code = Code::GetCodeFromTargetAddress(ic_addr);
     ASSERT(code->IsHeapObject());
     it.rinfo()->set_target_object(code);
   }
@@ -4980,7 +4980,7 @@
       if (it.rinfo()->IsCallInstruction()) {
         Address addr = it.rinfo()->call_address();
         ASSERT(addr != NULL);
-        HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize);
+        HeapObject* code = Code::GetCodeFromTargetAddress(addr);
         ASSERT(code->IsHeapObject());
         it.rinfo()->set_call_object(code);
       }
diff --git a/src/objects.h b/src/objects.h
index d9edce7..feeddcb 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -889,11 +889,11 @@
 
 
 // Smi represents integer Numbers that can be stored in 31 bits.
-// TODO(X64) Increase to 53 bits?
 // Smis are immediate which means they are NOT allocated in the heap.
-// The this pointer has the following format: [31 bit signed int] 0
-// TODO(X64): 31 bits signed int sign-extended to 63 bits.
 // Smi stands for small integer.
+// The this pointer has the following format: [31 bit signed int] 0
+// On 64-bit, the top 32 bits of the pointer is allowed to have any
+// value.
 class Smi: public Object {
  public:
   // Returns the integer value.
diff --git a/src/runtime.cc b/src/runtime.cc
index 95776e5..6272827 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -3696,7 +3696,7 @@
   CONVERT_DOUBLE_CHECKED(x, args[0]);
   CONVERT_DOUBLE_CHECKED(y, args[1]);
 
-#ifdef WIN32
+#if defined WIN32 || defined _WIN64
   // Workaround MS fmod bugs. ECMA-262 says:
   // dividend is finite and divisor is an infinity => result equals dividend
   // dividend is a zero and divisor is nonzero finite => result equals dividend
diff --git a/src/spaces.h b/src/spaces.h
index babdd3f..7170318 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1686,7 +1686,7 @@
 #endif
 
  public:
-  TRACK_MEMORY("MapSpace")
+  TRACK_MEMORY("CellSpace")
 };
 
 
diff --git a/src/third_party/dtoa/dtoa.c b/src/third_party/dtoa/dtoa.c
index fadc6d1..8917d9d 100644
--- a/src/third_party/dtoa/dtoa.c
+++ b/src/third_party/dtoa/dtoa.c
@@ -501,7 +501,9 @@
 #endif
 
 	ACQUIRE_DTOA_LOCK(0);
-	if ((rv = freelist[k])) {
+        /* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0). */
+        /* but this case seems very unlikely. */
+	if (k <= Kmax && (rv = freelist[k])) {
 		freelist[k] = rv->next;
 		}
 	else {
@@ -511,7 +513,7 @@
 #else
 		len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
 			/sizeof(double);
-		if (pmem_next - private_mem + len <= PRIVATE_mem) {
+		if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) {
 			rv = (Bigint*)pmem_next;
 			pmem_next += len;
 			}
@@ -535,10 +537,14 @@
 #endif
 {
 	if (v) {
-		ACQUIRE_DTOA_LOCK(0);
-		v->next = freelist[v->k];
-		freelist[v->k] = v;
-		FREE_DTOA_LOCK(0);
+                if (v->k > Kmax)
+                        free((void*)v);
+                else {
+         		ACQUIRE_DTOA_LOCK(0);
+         		v->next = freelist[v->k];
+        		freelist[v->k] = v;
+        		FREE_DTOA_LOCK(0);
+                        }
 		}
 	}
 
diff --git a/src/top.cc b/src/top.cc
index 5c22bcf..039c292 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -690,12 +690,17 @@
 void Top::ReportUncaughtException(Handle<Object> exception,
                                   MessageLocation* location,
                                   Handle<String> stack_trace) {
-  Handle<Object> message =
-    MessageHandler::MakeMessageObject("uncaught_exception",
-                                      location,
-                                      HandleVector<Object>(&exception, 1),
-                                      stack_trace);
-
+  Handle<Object> message;
+  if (!Bootstrapper::IsActive()) {
+    // It's not safe to try to make message objects while the bootstrapper
+    // is active since the infrastructure may not have been properly
+    // initialized.
+    message =
+      MessageHandler::MakeMessageObject("uncaught_exception",
+                                        location,
+                                        HandleVector<Object>(&exception, 1),
+                                        stack_trace);
+  }
   // Report the uncaught exception.
   MessageHandler::ReportMessage(location, message);
 }
@@ -769,10 +774,15 @@
       ComputeLocation(&potential_computed_location);
       location = &potential_computed_location;
     }
-    Handle<String> stack_trace;
-    if (FLAG_trace_exception) stack_trace = StackTrace();
-    message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
-        location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+    if (!Bootstrapper::IsActive()) {
+      // It's not safe to try to make message objects or collect stack
+      // traces while the bootstrapper is active since the infrastructure
+      // may not have been properly initialized.
+      Handle<String> stack_trace;
+      if (FLAG_trace_exception) stack_trace = StackTrace();
+      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+          location, HandleVector<Object>(&exception_handle, 1), stack_trace);
+    }
   }
 
   // Save the message for reporting if the the exception remains uncaught.
diff --git a/src/v8.h b/src/v8.h
index 50be6df..7786d66 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -51,11 +51,6 @@
 #error both DEBUG and NDEBUG are set
 #endif
 
-// Enable debugger support by default, unless it is in ANDROID
-#if !defined(ENABLE_DEBUGGER_SUPPORT) && !defined(ANDROID)
-#define ENABLE_DEBUGGER_SUPPORT
-#endif
-
 // Basic includes
 #include "../include/v8.h"
 #include "globals.h"
diff --git a/src/version.cc b/src/version.cc
index df5058a..df6b62f 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      10
+#define BUILD_NUMBER      11
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index af98ef9..b4204a9 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -173,22 +173,32 @@
 // Patch the code at the current PC with a call to the target address.
 // Additional guard int3 instructions can be added if required.
 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
-  // Call instruction takes up 13 bytes and int3 takes up one byte.
-  static const int kCallInstructionSize = 13;
-  Address patch_site = pc_;
-  Memory::uint16_at(patch_site) = 0xBA49u;  // movq r10, imm64
-  // Write "0x00, call r10" starting at last byte of address.  We overwrite
-  // the 0x00 later, and this lets us write a uint32.
-  Memory::uint32_at(patch_site + 9) = 0xD2FF4900u;  // 0x00, call r10
-  Memory::Address_at(patch_site + 2) = target;
+  // Load register with immediate 64 and call through a register instructions
+  // takes up 13 bytes and int3 takes up one byte.
+  static const int kCallCodeSize = 13;
+  int code_size = kCallCodeSize + guard_bytes;
+
+  // Create a code patcher.
+  CodePatcher patcher(pc_, code_size);
+
+  // Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+  Label check_codesize;
+  patcher.masm()->bind(&check_codesize);
+#endif
+
+  // Patch the code.
+  patcher.masm()->movq(r10, target, RelocInfo::NONE);
+  patcher.masm()->call(r10);
+
+  // Check that the size of the code generated is as expected.
+  ASSERT_EQ(kCallCodeSize,
+            patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 
   // Add the requested number of int3 instructions after the call.
   for (int i = 0; i < guard_bytes; i++) {
-    *(patch_site + kCallInstructionSize + i) = 0xCC;  // int3
+    patcher.masm()->int3();
   }
-
-  // Indicate that code has changed.
-  CPU::FlushICache(patch_site, kCallInstructionSize + guard_bytes);
 }
 
 
@@ -197,6 +207,9 @@
   for (int i = 0; i < instruction_count; i++) {
     *(pc_ + i) = *(instructions + i);
   }
+
+  // Indicate that code has changed.
+  CPU::FlushICache(pc_, instruction_count);
 }
 
 // -----------------------------------------------------------------------------
@@ -366,7 +379,7 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(overflow());  // should not call this otherwise
+  ASSERT(buffer_overflow());  // should not call this otherwise
   if (!own_buffer_) FATAL("external code buffer is too small");
 
   // compute new buffer size
@@ -428,7 +441,7 @@
     }
   }
 
-  ASSERT(!overflow());
+  ASSERT(!buffer_overflow());
 }
 
 
@@ -1410,6 +1423,15 @@
 }
 
 
+void Assembler::negl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xF7);
+  emit_modrm(0x3, dst);
+}
+
+
 void Assembler::neg(const Operand& dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 4d341c6..697dd54 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -447,7 +447,7 @@
 
   // Distance between the address of the code target in the call instruction
   // and the return address.  Checked in the debug build.
-  static const int kPatchReturnSequenceLength = 3 + kPointerSize;
+  static const int kCallTargetAddressOffset = 3 + kPointerSize;
   // Distance between start of patched return sequence and the emitted address
   // to jump to (movq = REX.W 0xB8+r.).
   static const int kPatchReturnSequenceAddressOffset = 2;
@@ -721,6 +721,7 @@
 
   void neg(Register dst);
   void neg(const Operand& dst);
+  void negl(Register dst);
 
   void not_(Register dst);
   void not_(const Operand& dst);
@@ -729,6 +730,10 @@
     arithmetic_op(0x0B, dst, src);
   }
 
+  void orl(Register dst, Register src) {
+    arithmetic_op_32(0x0B, dst, src);
+  }
+
   void or_(Register dst, const Operand& src) {
     arithmetic_op(0x0B, dst, src);
   }
@@ -860,6 +865,10 @@
     arithmetic_op(0x33, dst, src);
   }
 
+  void xorl(Register dst, Register src) {
+    arithmetic_op_32(0x33, dst, src);
+  }
+
   void xor_(Register dst, const Operand& src) {
     arithmetic_op(0x33, dst, src);
   }
@@ -1049,7 +1058,9 @@
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
   // an instruction or relocation information.
-  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+  inline bool buffer_overflow() const {
+    return pc_ >= reloc_info_writer.pos() - kGap;
+  }
 
   // Get the number of bytes available in the buffer.
   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1279,7 +1290,7 @@
 class EnsureSpace BASE_EMBEDDED {
  public:
   explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
-    if (assembler_->overflow()) assembler_->GrowBuffer();
+    if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
 #ifdef DEBUG
     space_before_ = assembler_->available_space();
 #endif
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 1fea61e..170a15b 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -61,8 +61,7 @@
   // Preserve the number of arguments on the stack. Must preserve both
   // rax and rbx because these registers are used when copying the
   // arguments and the receiver.
-  ASSERT(kSmiTagSize == 1);
-  __ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
+  __ Integer32ToSmi(rcx, rax);
   __ push(rcx);
 }
 
@@ -77,10 +76,13 @@
 
   // Remove caller arguments from the stack.
   // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
+  // TODO(smi): Find a way to abstract indexing by a smi.
   ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
   ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
+  // TODO(smi): Find way to abstract indexing by a smi.
   __ pop(rcx);
-  __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize));  // 1 ~ receiver
+  // 1 * kPointerSize is offset of receiver.
+  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
   __ push(rcx);
 }
 
@@ -192,8 +194,7 @@
   { Label done, non_function, function;
     // The function to call is at position n+1 on the stack.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
-    __ testl(rdi, Immediate(kSmiTagMask));
-    __ j(zero, &non_function);
+    __ JumpIfSmi(rdi, &non_function);
     __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
     __ j(equal, &function);
 
@@ -213,8 +214,7 @@
   { Label call_to_object, use_global_receiver, patch_receiver, done;
     __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
 
-    __ testl(rbx, Immediate(kSmiTagMask));
-    __ j(zero, &call_to_object);
+    __ JumpIfSmi(rbx, &call_to_object);
 
     __ CompareRoot(rbx, Heap::kNullValueRootIndex);
     __ j(equal, &use_global_receiver);
@@ -230,8 +230,7 @@
     __ EnterInternalFrame();  // preserves rax, rbx, rdi
 
     // Store the arguments count on the stack (smi tagged).
-    ASSERT(kSmiTag == 0);
-    __ shl(rax, Immediate(kSmiTagSize));
+    __ Integer32ToSmi(rax, rax);
     __ push(rax);
 
     __ push(rdi);  // save edi across the call
@@ -242,7 +241,7 @@
 
     // Get the arguments count and untag it.
     __ pop(rax);
-    __ shr(rax, Immediate(kSmiTagSize));
+    __ SmiToInteger32(rax, rax);
 
     __ LeaveInternalFrame();
     __ jmp(&patch_receiver);
@@ -355,8 +354,7 @@
     Label okay;
     // Make rdx the space we need for the array when it is unrolled onto the
     // stack.
-    __ movq(rdx, rax);
-    __ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
     __ cmpq(rcx, rdx);
     __ j(greater, &okay);
 
@@ -382,8 +380,7 @@
   // Compute the receiver.
   Label call_to_object, use_global_receiver, push_receiver;
   __ movq(rbx, Operand(rbp, kReceiverOffset));
-  __ testl(rbx, Immediate(kSmiTagMask));
-  __ j(zero, &call_to_object);
+  __ JumpIfSmi(rbx, &call_to_object);
   __ CompareRoot(rbx, Heap::kNullValueRootIndex);
   __ j(equal, &use_global_receiver);
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -446,7 +443,7 @@
 
   // Invoke the function.
   ParameterCount actual(rax);
-  __ shr(rax, Immediate(kSmiTagSize));
+  __ SmiToInteger32(rax, rax);
   __ movq(rdi, Operand(rbp, kFunctionOffset));
   __ InvokeFunction(rdi, actual, CALL_FUNCTION);
 
@@ -463,8 +460,7 @@
 
   Label non_function_call;
   // Check that function is not a smi.
-  __ testl(rdi, Immediate(kSmiTagMask));
-  __ j(zero, &non_function_call);
+  __ JumpIfSmi(rdi, &non_function_call);
   // Check that function is a JSFunction.
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
   __ j(not_equal, &non_function_call);
@@ -492,7 +488,7 @@
   __ EnterConstructFrame();
 
   // Store a smi-tagged arguments count on the stack.
-  __ shl(rax, Immediate(kSmiTagSize));
+  __ Integer32ToSmi(rax, rax);
   __ push(rax);
 
   // Push the function to invoke on the stack.
@@ -517,8 +513,7 @@
     // rdi: constructor
     __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi
-    __ testl(rax, Immediate(kSmiTagMask));
-    __ j(zero, &rt_call);
+    __ JumpIfSmi(rax, &rt_call);
     // rdi: constructor
     // rax: initial map (if proven valid below)
     __ CmpObjectType(rax, MAP_TYPE, rbx);
@@ -668,7 +663,7 @@
 
   // Retrieve smi-tagged arguments count from the stack.
   __ movq(rax, Operand(rsp, 0));
-  __ shr(rax, Immediate(kSmiTagSize));
+  __ SmiToInteger32(rax, rax);
 
   // Push the allocated receiver to the stack. We need two copies
   // because we may have to return the original one and the calling
@@ -701,8 +696,7 @@
   // on page 74.
   Label use_receiver, exit;
   // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(zero, &use_receiver);
+  __ JumpIfSmi(rax, &use_receiver);
 
   // If the type of the result (stored in its map) is less than
   // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
@@ -721,8 +715,10 @@
 
   // Remove caller arguments from the stack and return.
   ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  // TODO(smi): Find a way to abstract indexing by a smi.
   __ pop(rcx);
-  __ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize));  // 1 ~ receiver
+  // 1 * kPointerSize is offset of receiver.
+  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
   __ push(rcx);
   __ IncrementCounter(&Counters::constructed_objects, 1);
   __ ret(0);
diff --git a/src/x64/cfg-x64.cc b/src/x64/cfg-x64.cc
index 0b71d8e..b755f49 100644
--- a/src/x64/cfg-x64.cc
+++ b/src/x64/cfg-x64.cc
@@ -112,12 +112,14 @@
   __ pop(rbp);
   int count = CfgGlobals::current()->fun()->scope()->num_parameters();
   __ ret((count + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
   // Add padding that will be overwritten by a debugger breakpoint.
   // "movq rsp, rbp; pop rbp" has length 4.  "ret k" has length 3.
   const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3;
   for (int i = 0; i < kPadding; ++i) {
     __ int3();
   }
+#endif
 }
 
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 31f55ae..a8c8c40 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -509,6 +509,7 @@
   // receiver.
   frame_->Exit();
   masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
   // Add padding that will be overwritten by a debugger breakpoint.
   // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
   // with length 7 (3 + 1 + 3).
@@ -516,12 +517,12 @@
   for (int i = 0; i < kPadding; ++i) {
     masm_->int3();
   }
-  DeleteFrame();
-
   // Check that the size of the code used for returning matches what is
   // expected by the debugger.
   ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
             masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+  DeleteFrame();
 }
 
 
@@ -720,11 +721,12 @@
     frame_->SyncRange(0, frame_->element_count() - 1);
 
     // Check that the receiver really is a JavaScript object.
-    { frame_->PushElementAt(0);
+    {
+      frame_->PushElementAt(0);
       Result receiver = frame_->Pop();
       receiver.ToRegister();
-      __ testl(receiver.reg(), Immediate(kSmiTagMask));
-      build_args.Branch(zero);
+      Condition is_smi = masm_->CheckSmi(receiver.reg());
+      build_args.Branch(is_smi);
       // We allow all JSObjects including JSFunctions.  As long as
       // JS_FUNCTION_TYPE is the last instance type and it is right
       // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@@ -736,11 +738,12 @@
     }
 
     // Verify that we're invoking Function.prototype.apply.
-    { frame_->PushElementAt(1);
+    {
+      frame_->PushElementAt(1);
       Result apply = frame_->Pop();
       apply.ToRegister();
-      __ testl(apply.reg(), Immediate(kSmiTagMask));
-      build_args.Branch(zero);
+      Condition is_smi = masm_->CheckSmi(apply.reg());
+      build_args.Branch(is_smi);
       Result tmp = allocator_->Allocate();
       __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
       build_args.Branch(not_equal);
@@ -755,8 +758,8 @@
     // Get the function receiver from the stack. Check that it
     // really is a function.
     __ movq(rdi, Operand(rsp, 2 * kPointerSize));
-    __ testl(rdi, Immediate(kSmiTagMask));
-    build_args.Branch(zero);
+    Condition is_smi = masm_->CheckSmi(rdi);
+    build_args.Branch(is_smi);
     __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
     build_args.Branch(not_equal);
 
@@ -780,7 +783,7 @@
     __ bind(&adapted);
     static const uint32_t kArgumentsLimit = 1 * KB;
     __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ shrl(rax, Immediate(kSmiTagSize));
+    __ SmiToInteger32(rax, rax);
     __ movq(rcx, rax);
     __ cmpq(rax, Immediate(kArgumentsLimit));
     build_args.Branch(above);
@@ -1657,8 +1660,8 @@
 
   // Check if enumerable is already a JSObject
   // rax: value to be iterated over
-  __ testl(rax, Immediate(kSmiTagMask));
-  primitive.Branch(zero);
+  Condition is_smi = masm_->CheckSmi(rax);
+  primitive.Branch(is_smi);
   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
   jsobject.Branch(above_equal);
 
@@ -1695,8 +1698,8 @@
 
   frame_->EmitPush(rax);  // <- slot 3
   frame_->EmitPush(rdx);  // <- slot 2
-  __ movsxlq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
-  __ shl(rax, Immediate(kSmiTagSize));
+  __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
   frame_->EmitPush(rax);  // <- slot 1
   frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
   entry.Jump();
@@ -1707,8 +1710,8 @@
   frame_->EmitPush(rax);  // <- slot 2
 
   // Push the length of the array and the initial index onto the stack.
-  __ movsxlq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
-  __ shl(rax, Immediate(kSmiTagSize));
+  __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
   frame_->EmitPush(rax);  // <- slot 1
   frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
 
@@ -1725,9 +1728,9 @@
 
   // Get the i'th entry of the array.
   __ movq(rdx, frame_->ElementAt(2));
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  // Multiplier is times_4 since rax is already a Smi.
-  __ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
+  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx,
+          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
 
   // Get the expected map from the stack or a zero map in the
   // permanent slow case rax: current iteration count rbx: i'th entry
@@ -3093,8 +3096,9 @@
         JumpTarget continue_label;
         Result operand = frame_->Pop();
         operand.ToRegister();
-        __ testl(operand.reg(), Immediate(kSmiTagMask));
-        smi_label.Branch(zero, &operand);
+
+        Condition is_smi = masm_->CheckSmi(operand.reg());
+        smi_label.Branch(is_smi, &operand);
 
         frame_->Push(&operand);  // undo popping of TOS
         Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
@@ -3103,9 +3107,7 @@
         smi_label.Bind(&answer);
         answer.ToRegister();
         frame_->Spill(answer.reg());
-        __ not_(answer.reg());
-        // Remove inverted smi-tag.  The mask is sign-extended to 64 bits.
-        __ xor_(answer.reg(), Immediate(kSmiTagMask));
+        __ SmiNot(answer.reg(), answer.reg());
         continue_label.Bind(&answer);
         frame_->Push(&answer);
         break;
@@ -3116,9 +3118,8 @@
         JumpTarget continue_label;
         Result operand = frame_->Pop();
         operand.ToRegister();
-        __ testl(operand.reg(), Immediate(kSmiTagMask));
-        continue_label.Branch(zero, &operand, taken);
-
+        Condition is_smi = masm_->CheckSmi(operand.reg());
+        continue_label.Branch(is_smi, &operand);
         frame_->Push(&operand);
         Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
                                               CALL_FUNCTION, 1);
@@ -3264,8 +3265,7 @@
     }
     // Smi test.
     deferred->Branch(overflow);
-    __ testl(kScratchRegister, Immediate(kSmiTagMask));
-    deferred->Branch(not_zero);
+    __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
     __ movq(new_value.reg(), kScratchRegister);
     deferred->BindExit();
 
@@ -3470,8 +3470,8 @@
     answer.ToRegister();
 
     if (check->Equals(Heap::number_symbol())) {
-      __ testl(answer.reg(), Immediate(kSmiTagMask));
-      destination()->true_target()->Branch(zero);
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->true_target()->Branch(is_smi);
       frame_->Spill(answer.reg());
       __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
       __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
@@ -3479,8 +3479,8 @@
       destination()->Split(equal);
 
     } else if (check->Equals(Heap::string_symbol())) {
-      __ testl(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
 
       // It can be an undetectable string object.
       __ movq(kScratchRegister,
@@ -3503,8 +3503,8 @@
       __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
       destination()->true_target()->Branch(equal);
 
-      __ testl(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
 
       // It can be an undetectable object.
       __ movq(kScratchRegister,
@@ -3515,16 +3515,16 @@
       destination()->Split(not_zero);
 
     } else if (check->Equals(Heap::function_symbol())) {
-      __ testl(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
       frame_->Spill(answer.reg());
       __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
       answer.Unuse();
       destination()->Split(equal);
 
     } else if (check->Equals(Heap::object_symbol())) {
-      __ testl(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
+      Condition is_smi = masm_->CheckSmi(answer.reg());
+      destination()->false_target()->Branch(is_smi);
       __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
       destination()->true_target()->Branch(equal);
 
@@ -3623,8 +3623,8 @@
   Result value = frame_->Pop();
   value.ToRegister();
   ASSERT(value.is_valid());
-  __ testl(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
+  Condition is_smi = masm_->CheckSmi(value.reg());
+  destination()->false_target()->Branch(is_smi);
   // It is a heap object - get map.
   // Check if the object is a JS array or not.
   __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
@@ -3727,17 +3727,13 @@
   // push.
 
   // If the receiver is a smi trigger the slow case.
-  ASSERT(kSmiTag == 0);
-  __ testl(object.reg(), Immediate(kSmiTagMask));
-  __ j(zero, &slow_case);
+  __ JumpIfSmi(object.reg(), &slow_case);
 
   // If the index is negative or non-smi trigger the slow case.
-  ASSERT(kSmiTag == 0);
-  __ testl(index.reg(),
-           Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
-  __ j(not_zero, &slow_case);
+  __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
+
   // Untag the index.
-  __ sarl(index.reg(), Immediate(kSmiTagSize));
+  __ SmiToInteger32(index.reg(), index.reg());
 
   __ bind(&try_again_with_new_string);
   // Fetch the instance type of the receiver into rcx.
@@ -3790,8 +3786,7 @@
                                       times_1,
                                       SeqAsciiString::kHeaderSize));
   __ bind(&got_char_code);
-  ASSERT(kSmiTag == 0);
-  __ shl(temp.reg(), Immediate(kSmiTagSize));
+  __ Integer32ToSmi(temp.reg(), temp.reg());
   __ jmp(&end);
 
   // Handle non-flat strings.
@@ -3832,10 +3827,9 @@
   Result value = frame_->Pop();
   value.ToRegister();
   ASSERT(value.is_valid());
-  __ testl(value.reg(),
-           Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+  Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
   value.Unuse();
-  destination()->Split(zero);
+  destination()->Split(positive_smi);
 }
 
 
@@ -3845,9 +3839,9 @@
   Result value = frame_->Pop();
   value.ToRegister();
   ASSERT(value.is_valid());
-  __ testl(value.reg(), Immediate(kSmiTagMask));
+  Condition is_smi = masm_->CheckSmi(value.reg());
   value.Unuse();
-  destination()->Split(zero);
+  destination()->Split(is_smi);
 }
 
 
@@ -3891,7 +3885,9 @@
 
 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
-  ASSERT(kSmiTag == 0);  // RBP value is aligned, so it should look like Smi.
+  // RBP value is aligned, so it should be tagged as a smi (without necesarily
+  // being padded as a smi).
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   Result rbp_as_smi = allocator_->Allocate();
   ASSERT(rbp_as_smi.is_valid());
   __ movq(rbp_as_smi.reg(), rbp);
@@ -4002,8 +3998,8 @@
   frame_->Spill(obj.reg());
 
   // If the object is a smi, we return null.
-  __ testl(obj.reg(), Immediate(kSmiTagMask));
-  null.Branch(zero);
+  Condition is_smi = masm_->CheckSmi(obj.reg());
+  null.Branch(is_smi);
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
@@ -4064,8 +4060,8 @@
   object.ToRegister();
 
   // if (object->IsSmi()) return value.
-  __ testl(object.reg(), Immediate(kSmiTagMask));
-  leave.Branch(zero, &value);
+  Condition is_smi = masm_->CheckSmi(object.reg());
+  leave.Branch(is_smi, &value);
 
   // It is a heap object - get its map.
   Result scratch = allocator_->Allocate();
@@ -4105,8 +4101,8 @@
   object.ToRegister();
   ASSERT(object.is_valid());
   // if (object->IsSmi()) return object.
-  __ testl(object.reg(), Immediate(kSmiTagMask));
-  leave.Branch(zero);
+  Condition is_smi = masm_->CheckSmi(object.reg());
+  leave.Branch(is_smi);
   // It is a heap object - get map.
   Result temp = allocator()->Allocate();
   ASSERT(temp.is_valid());
@@ -4274,11 +4270,10 @@
   dest->false_target()->Branch(equal);
 
   // Smi => false iff zero.
-  ASSERT(kSmiTag == 0);
-  __ testl(value.reg(), value.reg());
-  dest->false_target()->Branch(zero);
-  __ testl(value.reg(), Immediate(kSmiTagMask));
-  dest->true_target()->Branch(zero);
+  Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
+  dest->false_target()->Branch(equals);
+  Condition is_smi = masm_->CheckSmi(value.reg());
+  dest->true_target()->Branch(is_smi);
 
   // Call the stub for all other cases.
   frame_->Push(&value);  // Undo the Pop() from above.
@@ -4940,8 +4935,9 @@
       JumpTarget is_smi;
       Register left_reg = left_side.reg();
       Handle<Object> right_val = right_side.handle();
-      __ testl(left_side.reg(), Immediate(kSmiTagMask));
-      is_smi.Branch(zero, taken);
+
+      Condition left_is_smi = masm_->CheckSmi(left_side.reg());
+      is_smi.Branch(left_is_smi);
 
       // Setup and call the compare stub.
       CompareStub stub(cc, strict);
@@ -4982,8 +4978,8 @@
       dest->true_target()->Branch(equal);
       __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
       dest->true_target()->Branch(equal);
-      __ testl(operand.reg(), Immediate(kSmiTagMask));
-      dest->false_target()->Branch(equal);
+      Condition is_smi = masm_->CheckSmi(operand.reg());
+      dest->false_target()->Branch(is_smi);
 
       // It can be an undetectable object.
       // Use a scratch register in preference to spilling operand.reg().
@@ -5023,10 +5019,8 @@
       Register left_reg = left_side.reg();
       Register right_reg = right_side.reg();
 
-      __ movq(kScratchRegister, left_reg);
-      __ or_(kScratchRegister, right_reg);
-      __ testl(kScratchRegister, Immediate(kSmiTagMask));
-      is_smi.Branch(zero, taken);
+      Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
+      is_smi.Branch(both_smi);
       // When non-smi, call out to the compare stub.
       CompareStub stub(cc, strict);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
@@ -5317,15 +5311,11 @@
                                             smi_value,
                                             overwrite_mode);
       }
-      __ testl(operand->reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-      // A smi currently fits in a 32-bit Immediate.
-      __ addl(operand->reg(), Immediate(smi_value));
-      Label add_success;
-      __ j(no_overflow, &add_success);
-      __ subl(operand->reg(), Immediate(smi_value));
-      deferred->Jump();
-      __ bind(&add_success);
+      __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+      __ SmiAddConstant(operand->reg(),
+                        operand->reg(),
+                        int_value,
+                        deferred->entry_label());
       deferred->BindExit();
       frame_->Push(operand);
       break;
@@ -5342,15 +5332,12 @@
         DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
                                                           smi_value,
                                                           overwrite_mode);
-        __ testl(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
+        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
         // A smi currently fits in a 32-bit Immediate.
-        __ subl(operand->reg(), Immediate(smi_value));
-        Label add_success;
-        __ j(no_overflow, &add_success);
-        __ addl(operand->reg(), Immediate(smi_value));
-        deferred->Jump();
-        __ bind(&add_success);
+        __ SmiSubConstant(operand->reg(),
+                          operand->reg(),
+                          int_value,
+                          deferred->entry_label());
         deferred->BindExit();
         frame_->Push(operand);
       }
@@ -5374,12 +5361,10 @@
                                            operand->reg(),
                                            smi_value,
                                            overwrite_mode);
-        __ testl(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-        if (shift_value > 0) {
-          __ sarl(operand->reg(), Immediate(shift_value));
-          __ and_(operand->reg(), Immediate(~kSmiTagMask));
-        }
+        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+        __ SmiShiftArithmeticRightConstant(operand->reg(),
+                                           operand->reg(),
+                                           shift_value);
         deferred->BindExit();
         frame_->Push(operand);
       }
@@ -5403,21 +5388,13 @@
                                            operand->reg(),
                                            smi_value,
                                            overwrite_mode);
-        __ testl(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-        __ movl(answer.reg(), operand->reg());
-        __ sarl(answer.reg(), Immediate(kSmiTagSize));
-        __ shrl(answer.reg(), Immediate(shift_value));
-        // A negative Smi shifted right two is in the positive Smi range.
-        if (shift_value < 2) {
-          __ testl(answer.reg(), Immediate(0xc0000000));
-          deferred->Branch(not_zero);
-        }
-        operand->Unuse();
-        ASSERT(kSmiTag == 0);
-        ASSERT(kSmiTagSize == 1);
-        __ addl(answer.reg(), answer.reg());
+        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+        __ SmiShiftLogicalRightConstant(answer.reg(),
+                                      operand->reg(),
+                                      shift_value,
+                                      deferred->entry_label());
         deferred->BindExit();
+        operand->Unuse();
         frame_->Push(&answer);
       }
       break;
@@ -5441,8 +5418,7 @@
                                              operand->reg(),
                                              smi_value,
                                              overwrite_mode);
-          __ testl(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
+          __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
           deferred->BindExit();
           frame_->Push(operand);
         } else {
@@ -5455,18 +5431,11 @@
                                              operand->reg(),
                                              smi_value,
                                              overwrite_mode);
-          __ testl(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-          __ movl(answer.reg(), operand->reg());
-          ASSERT(kSmiTag == 0);  // adjust code if not the case
-          // We do no shifts, only the Smi conversion, if shift_value is 1.
-          if (shift_value > 1) {
-            __ shll(answer.reg(), Immediate(shift_value - 1));
-          }
-          // Convert int result to Smi, checking that it is in int range.
-          ASSERT(kSmiTagSize == 1);  // adjust code if not the case
-          __ addl(answer.reg(), answer.reg());
-          deferred->Branch(overflow);
+          __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
+          __ SmiShiftLeftConstant(answer.reg(),
+                                  operand->reg(),
+                                  shift_value,
+                                  deferred->entry_label());
           deferred->BindExit();
           operand->Unuse();
           frame_->Push(&answer);
@@ -5490,18 +5459,17 @@
                                                                operand->reg(),
                                                                smi_value,
                                                                overwrite_mode);
-      __ testl(operand->reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
       if (op == Token::BIT_AND) {
-        __ and_(operand->reg(), Immediate(smi_value));
+        __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
       } else if (op == Token::BIT_XOR) {
         if (int_value != 0) {
-          __ xor_(operand->reg(), Immediate(smi_value));
+          __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
         }
       } else {
         ASSERT(op == Token::BIT_OR);
         if (int_value != 0) {
-          __ or_(operand->reg(), Immediate(smi_value));
+          __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
         }
       }
       deferred->BindExit();
@@ -5522,14 +5490,12 @@
                                                                 smi_value,
                                                                 overwrite_mode);
         // Check for negative or non-Smi left hand side.
-        __ testl(operand->reg(),
-                 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
-        deferred->Branch(not_zero);
+        __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
         if (int_value < 0) int_value = -int_value;
         if (int_value == 1) {
           __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
         } else {
-          __ and_(operand->reg(), Immediate((int_value << kSmiTagSize) - 1));
+          __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
         }
         deferred->BindExit();
         frame_->Push(operand);
@@ -5631,67 +5597,17 @@
                                           left->reg(),
                                           right->reg(),
                                           overwrite_mode);
-    if (left->reg().is(right->reg())) {
-      __ testl(left->reg(), Immediate(kSmiTagMask));
-    } else {
-      // Use the quotient register as a scratch for the tag check.
-      if (!left_is_in_rax) __ movq(rax, left->reg());
-      left_is_in_rax = false;  // About to destroy the value in rax.
-      __ or_(rax, right->reg());
-      ASSERT(kSmiTag == 0);  // Adjust test if not the case.
-      __ testl(rax, Immediate(kSmiTagMask));
-    }
-    deferred->Branch(not_zero);
+    __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
 
-    // All operations on the smi values are on 32-bit registers, which are
-    // zero-extended into 64-bits by all 32-bit operations.
-    if (!left_is_in_rax) __ movl(rax, left->reg());
-    // Sign extend eax into edx:eax.
-    __ cdq();
-    // Check for 0 divisor.
-    __ testl(right->reg(), right->reg());
-    deferred->Branch(zero);
-    // Divide rdx:rax by the right operand.
-    __ idivl(right->reg());
-
-    // Complete the operation.
     if (op == Token::DIV) {
-      // Check for negative zero result.  If the result is zero, and the
-      // divisor is negative, return a floating point negative zero.
-      Label non_zero_result;
-      __ testl(left->reg(), left->reg());
-      __ j(not_zero, &non_zero_result);
-      __ testl(right->reg(), right->reg());
-      deferred->Branch(negative);
-      // The frame is identical on all paths reaching this label.
-      __ bind(&non_zero_result);
-      // Check for the corner case of dividing the most negative smi by
-      // -1. We cannot use the overflow flag, since it is not set by
-      // idiv instruction.
-      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-      __ cmpl(rax, Immediate(0x40000000));
-      deferred->Branch(equal);
-      // Check that the remainder is zero.
-      __ testl(rdx, rdx);
-      deferred->Branch(not_zero);
-      // Tag the result and store it in the quotient register.
-      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
-      __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
       frame_->Push(&quotient);
     } else {
       ASSERT(op == Token::MOD);
-      // Check for a negative zero result.  If the result is zero, and the
-      // dividend is negative, return a floating point negative zero.
-      Label non_zero_result;
-      __ testl(rdx, rdx);
-      __ j(not_zero, &non_zero_result);
-      __ testl(left->reg(), left->reg());
-      deferred->Branch(negative);
-      // The frame is identical on all paths reaching this label.
-      __ bind(&non_zero_result);
+      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
@@ -5730,59 +5646,30 @@
                                           overwrite_mode);
     __ movq(answer.reg(), left->reg());
     __ or_(answer.reg(), rcx);
-    __ testl(answer.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(not_zero);
+    __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
 
-    // Untag both operands.
-    __ movl(answer.reg(), left->reg());
-    __ sarl(answer.reg(), Immediate(kSmiTagSize));
-    __ sarl(rcx, Immediate(kSmiTagSize));
     // Perform the operation.
     switch (op) {
       case Token::SAR:
-        __ sarl(answer.reg());
-        // No checks of result necessary
+        __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
         break;
       case Token::SHR: {
-        Label result_ok;
-        __ shrl(answer.reg());
-        // Check that the *unsigned* result fits in a smi.  Neither of
-        // the two high-order bits can be set:
-        //  * 0x80000000: high bit would be lost when smi tagging.
-        //  * 0x40000000: this number would convert to negative when smi
-        //    tagging.
-        // These two cases can only happen with shifts by 0 or 1 when
-        // handed a valid smi.  If the answer cannot be represented by a
-        // smi, restore the left and right arguments, and jump to slow
-        // case.  The low bit of the left argument may be lost, but only
-        // in a case where it is dropped anyway.
-        __ testl(answer.reg(), Immediate(0xc0000000));
-        __ j(zero, &result_ok);
-        ASSERT(kSmiTag == 0);
-        __ shl(rcx, Immediate(kSmiTagSize));
-        deferred->Jump();
-        __ bind(&result_ok);
+        __ SmiShiftLogicalRight(answer.reg(),
+                              left->reg(),
+                              rcx,
+                              deferred->entry_label());
         break;
       }
       case Token::SHL: {
-        Label result_ok;
-        __ shl(answer.reg());
-        // Check that the *signed* result fits in a smi.
-        __ cmpl(answer.reg(), Immediate(0xc0000000));
-        __ j(positive, &result_ok);
-        ASSERT(kSmiTag == 0);
-        __ shl(rcx, Immediate(kSmiTagSize));
-        deferred->Jump();
-        __ bind(&result_ok);
+        __ SmiShiftLeft(answer.reg(),
+                        left->reg(),
+                        rcx,
+                        deferred->entry_label());
         break;
       }
       default:
         UNREACHABLE();
     }
-    // Smi-tag the result in answer.
-    ASSERT(kSmiTagSize == 1);  // Adjust code if not the case.
-    __ lea(answer.reg(),
-           Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
     deferred->BindExit();
     left->Unuse();
     right->Unuse();
@@ -5806,63 +5693,41 @@
                                         left->reg(),
                                         right->reg(),
                                         overwrite_mode);
-  if (left->reg().is(right->reg())) {
-    __ testl(left->reg(), Immediate(kSmiTagMask));
-  } else {
-    __ movq(answer.reg(), left->reg());
-    __ or_(answer.reg(), right->reg());
-    ASSERT(kSmiTag == 0);  // Adjust test if not the case.
-    __ testl(answer.reg(), Immediate(kSmiTagMask));
-  }
-  deferred->Branch(not_zero);
-  __ movq(answer.reg(), left->reg());
+  __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
+
   switch (op) {
     case Token::ADD:
-      __ addl(answer.reg(), right->reg());
-      deferred->Branch(overflow);
+      __ SmiAdd(answer.reg(),
+                left->reg(),
+                right->reg(),
+                deferred->entry_label());
       break;
 
     case Token::SUB:
-      __ subl(answer.reg(), right->reg());
-      deferred->Branch(overflow);
+      __ SmiSub(answer.reg(),
+                left->reg(),
+                right->reg(),
+                deferred->entry_label());
       break;
 
     case Token::MUL: {
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
-      // Remove smi tag from the left operand (but keep sign).
-      // Left-hand operand has been copied into answer.
-      __ sarl(answer.reg(), Immediate(kSmiTagSize));
-      // Do multiplication of smis, leaving result in answer.
-      __ imull(answer.reg(), right->reg());
-      // Go slow on overflows.
-      deferred->Branch(overflow);
-      // Check for negative zero result.  If product is zero, and one
-      // argument is negative, go to slow case.  The frame is unchanged
-      // in this block, so local control flow can use a Label rather
-      // than a JumpTarget.
-      Label non_zero_result;
-      __ testl(answer.reg(), answer.reg());
-      __ j(not_zero, &non_zero_result);
-      __ movq(answer.reg(), left->reg());
-      __ or_(answer.reg(), right->reg());
-      deferred->Branch(negative);
-      __ xor_(answer.reg(), answer.reg());  // Positive 0 is correct.
-      __ bind(&non_zero_result);
+      __ SmiMul(answer.reg(),
+                left->reg(),
+                right->reg(),
+                deferred->entry_label());
       break;
     }
 
     case Token::BIT_OR:
-      __ or_(answer.reg(), right->reg());
+      __ SmiOr(answer.reg(), left->reg(), right->reg());
       break;
 
     case Token::BIT_AND:
-      __ and_(answer.reg(), right->reg());
+      __ SmiAnd(answer.reg(), left->reg(), right->reg());
       break;
 
     case Token::BIT_XOR:
-      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
-      __ xor_(answer.reg(), right->reg());
+      __ SmiXor(answer.reg(), left->reg(), right->reg());
       break;
 
     default:
@@ -5973,8 +5838,7 @@
                                                GetName());
 
         // Check that the receiver is a heap object.
-        __ testl(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(zero);
+        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
 
         __ bind(deferred->patch_site());
         // This is the map check instruction that will be patched (so we can't
@@ -6046,8 +5910,7 @@
         // is not a load from the global context) and that it has the
         // expected map.
         if (!is_global) {
-          __ testl(receiver.reg(), Immediate(kSmiTagMask));
-          deferred->Branch(zero);
+          __ JumpIfSmi(receiver.reg(), deferred->entry_label());
         }
 
         // Initially, use an invalid map. The map is patched in the IC
@@ -6062,9 +5925,7 @@
         deferred->Branch(not_equal);
 
         // Check that the key is a non-negative smi.
-        __ testl(key.reg(),
-                 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
-        deferred->Branch(not_zero);
+        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
 
         // Get the elements array from the receiver and check that it
         // is not a dictionary.
@@ -6076,8 +5937,7 @@
 
         // Shift the key to get the actual index value and check that
         // it is within bounds.
-        __ movl(index.reg(), key.reg());
-        __ shrl(index.reg(), Immediate(kSmiTagSize));
+        __ SmiToInteger32(index.reg(), key.reg());
         __ cmpl(index.reg(),
                 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
         deferred->Branch(above_equal);
@@ -6228,20 +6088,16 @@
         // Check that the value is a smi if it is not a constant.
         // We can skip the write barrier for smis and constants.
         if (!value_is_constant) {
-          __ testl(value.reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
+          __ JumpIfNotSmi(value.reg(), deferred->entry_label());
         }
 
         // Check that the key is a non-negative smi.
-        __ testl(key.reg(),
-                 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
-        deferred->Branch(not_zero);
+        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
         // Ensure that the smi is zero-extended.  This is not guaranteed.
         __ movl(key.reg(), key.reg());
 
         // Check that the receiver is not a smi.
-        __ testl(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(zero);
+        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
 
         // Check that the receiver is a JSArray.
         __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
@@ -6272,11 +6128,11 @@
         deferred->Branch(not_equal);
 
         // Store the value.
-        ASSERT_EQ(1, kSmiTagSize);
-        ASSERT_EQ(0, kSmiTag);
-        __ movq(Operand(tmp.reg(),
-                        key.reg(),
-                        times_half_pointer_size,
+        SmiIndex index =
+            masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+              __ movq(Operand(tmp.reg(),
+                        index.reg,
+                        index.scale,
                         FixedArray::kHeaderSize - kHeapObjectTag),
                 value.reg());
         __ IncrementCounter(&Counters::keyed_store_inline, 1);
@@ -6457,8 +6313,7 @@
   Label try_float;
   Label special;
   // Check whether the value is a smi.
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(not_zero, &try_float);
+  __ JumpIfNotSmi(rax, &try_float);
 
   // Enter runtime system if the value of the smi is zero
   // to make sure that we switch between 0 and -0.
@@ -6567,23 +6422,7 @@
       // be equal if the other is a HeapNumber. If so, use the slow case.
       {
         Label not_smis;
-        ASSERT_EQ(0, kSmiTag);
-        ASSERT_EQ(0, Smi::FromInt(0));
-        __ movq(rcx, Immediate(kSmiTagMask));
-        __ and_(rcx, rax);
-        __ testq(rcx, rdx);
-        __ j(not_zero, &not_smis);
-        // One operand is a smi.
-
-        // Check whether the non-smi is a heap number.
-        ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
-        // rcx still holds rax & kSmiTag, which is either zero or one.
-        __ decq(rcx);  // If rax is a smi, all 1s, else all 0s.
-        __ movq(rbx, rdx);
-        __ xor_(rbx, rax);
-        __ and_(rbx, rcx);  // rbx holds either 0 or rax ^ rdx.
-        __ xor_(rbx, rax);
-        // if rax was smi, rbx is now rdx, else rax.
+        __ SelectNonSmi(rbx, rax, rdx, &not_smis);
 
         // Check if the non-smi operand is a heap number.
         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@@ -6712,8 +6551,7 @@
                                     Label* label,
                                     Register object,
                                     Register scratch) {
-  __ testl(object, Immediate(kSmiTagMask));
-  __ j(zero, label);
+  __ JumpIfSmi(object, label);
   __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
   __ movzxbq(scratch,
              FieldOperand(scratch, Map::kInstanceTypeOffset));
@@ -6757,8 +6595,7 @@
   // Get the object - go slow case if it's a smi.
   Label slow;
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  __ JumpIfSmi(rax, &slow);
 
   // Check that the left hand is a JS object. Leave its map in rax.
   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
@@ -6771,8 +6608,7 @@
   __ TryGetFunctionPrototype(rdx, rbx, &slow);
 
   // Check that the function prototype is a JS object.
-  __ testl(rbx, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  __ JumpIfSmi(rbx, &slow);
   __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
   __ j(below, &slow);
   __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
@@ -6824,7 +6660,8 @@
   // Patch the arguments.length and the parameters pointer.
   __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ movq(Operand(rsp, 1 * kPointerSize), rcx);
-  __ lea(rdx, Operand(rdx, rcx, times_4, kDisplacement));
+  SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+  __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
   __ movq(Operand(rsp, 2 * kPointerSize), rdx);
 
   // Do the runtime call to allocate the arguments object.
@@ -6844,8 +6681,7 @@
 
   // Check that the key is a smi.
   Label slow;
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow);
+  __ JumpIfNotSmi(rdx, &slow);
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
@@ -6861,12 +6697,10 @@
   __ j(above_equal, &slow);
 
   // Read the argument from the stack and return it.
-  // Shifting code depends on SmiEncoding being equivalent to left shift:
-  // we multiply by four to get pointer alignment.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ lea(rbx, Operand(rbp, rax, times_4, 0));
-  __ neg(rdx);
-  __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
+  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
   __ Ret();
 
   // Arguments adaptor case: Check index against actual arguments
@@ -6878,12 +6712,10 @@
   __ j(above_equal, &slow);
 
   // Read the argument from the stack and return it.
-  // Shifting code depends on SmiEncoding being equivalent to left shift:
-  // we multiply by four to get pointer alignment.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ lea(rbx, Operand(rbx, rcx, times_4, 0));
-  __ neg(rdx);
-  __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
+  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
+  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
+  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
+  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
   __ Ret();
 
   // Slow-case: Handle non-smi or out-of-bounds access to arguments
@@ -7139,8 +6971,7 @@
   __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
 
   // Check that the function really is a JavaScript function.
-  __ testl(rdi, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  __ JumpIfSmi(rdi, &slow);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
   __ j(not_equal, &slow);
@@ -7390,13 +7221,12 @@
                                            Register number) {
   Label load_smi, done;
 
-  __ testl(number, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi);
+  __ JumpIfSmi(number, &load_smi);
   __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
   __ jmp(&done);
 
   __ bind(&load_smi);
-  __ sarl(number, Immediate(kSmiTagSize));
+  __ SmiToInteger32(number, number);
   __ push(number);
   __ fild_s(Operand(rsp, 0));
   __ pop(number);
@@ -7410,13 +7240,12 @@
                                            XMMRegister dst) {
   Label load_smi, done;
 
-  __ testl(src, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi);
+  __ JumpIfSmi(src, &load_smi);
   __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
   __ jmp(&done);
 
   __ bind(&load_smi);
-  __ sarl(src, Immediate(kSmiTagSize));
+  __ SmiToInteger32(src, src);
   __ cvtlsi2sd(dst, src);
 
   __ bind(&done);
@@ -7445,26 +7274,24 @@
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
   Label load_smi_1, load_smi_2, done_load_1, done;
   __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
-  __ testl(kScratchRegister, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_1);
+  __ JumpIfSmi(kScratchRegister, &load_smi_1);
   __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
   __ bind(&done_load_1);
 
   __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
-  __ testl(kScratchRegister, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_2);
+  __ JumpIfSmi(kScratchRegister, &load_smi_2);
   __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
   __ jmp(&done);
 
   __ bind(&load_smi_1);
-  __ sarl(kScratchRegister, Immediate(kSmiTagSize));
+  __ SmiToInteger32(kScratchRegister, kScratchRegister);
   __ push(kScratchRegister);
   __ fild_s(Operand(rsp, 0));
   __ pop(kScratchRegister);
   __ jmp(&done_load_1);
 
   __ bind(&load_smi_2);
-  __ sarl(kScratchRegister, Immediate(kSmiTagSize));
+  __ SmiToInteger32(kScratchRegister, kScratchRegister);
   __ push(kScratchRegister);
   __ fild_s(Operand(rsp, 0));
   __ pop(kScratchRegister);
@@ -7477,29 +7304,23 @@
                                             Register lhs,
                                             Register rhs) {
   Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
-  __ testl(lhs, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_lhs);
+  __ JumpIfSmi(lhs, &load_smi_lhs);
   __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
   __ bind(&done_load_lhs);
 
-  __ testl(rhs, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_rhs);
+  __ JumpIfSmi(rhs, &load_smi_rhs);
   __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
   __ jmp(&done);
 
   __ bind(&load_smi_lhs);
-  ASSERT(kSmiTagSize == 1);
-  ASSERT(kSmiTag == 0);
-  __ movsxlq(kScratchRegister, lhs);
-  __ sar(kScratchRegister, Immediate(kSmiTagSize));
+  __ SmiToInteger64(kScratchRegister, lhs);
   __ push(kScratchRegister);
   __ fild_d(Operand(rsp, 0));
   __ pop(kScratchRegister);
   __ jmp(&done_load_lhs);
 
   __ bind(&load_smi_rhs);
-  __ movsxlq(kScratchRegister, rhs);
-  __ sar(kScratchRegister, Immediate(kSmiTagSize));
+  __ SmiToInteger64(kScratchRegister, rhs);
   __ push(kScratchRegister);
   __ fild_d(Operand(rsp, 0));
   __ pop(kScratchRegister);
@@ -7513,14 +7334,12 @@
   Label test_other, done;
   // Test if both operands are numbers (heap_numbers or smis).
   // If not, jump to label non_float.
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &test_other);  // argument in rdx is OK
+  __ JumpIfSmi(rdx, &test_other);  // argument in rdx is OK
   __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
   __ j(not_equal, non_float);  // The argument in rdx is not a number.
 
   __ bind(&test_other);
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(zero, &done);  // argument in rax is OK
+  __ JumpIfSmi(rax, &done);  // argument in rax is OK
   __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
   __ j(not_equal, non_float);  // The argument in rax is not a number.
 
@@ -7551,88 +7370,41 @@
   // leave result in register rax.
 
   // Smi check both operands.
-  __ movq(rcx, rbx);
-  __ or_(rcx, rax);  // The value in ecx is used for negative zero test later.
-  __ testl(rcx, Immediate(kSmiTagMask));
-  __ j(not_zero, slow);
+  __ JumpIfNotBothSmi(rax, rbx, slow);
 
   switch (op_) {
     case Token::ADD: {
-      __ addl(rax, rbx);
-      __ j(overflow, slow);  // The slow case rereads operands from the stack.
+      __ SmiAdd(rax, rax, rbx, slow);
       break;
     }
 
     case Token::SUB: {
-      __ subl(rax, rbx);
-      __ j(overflow, slow);  // The slow case rereads operands from the stack.
+      __ SmiSub(rax, rax, rbx, slow);
       break;
     }
 
     case Token::MUL:
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      ASSERT(kSmiTag == 0);  // adjust code below if not the case
-      // Remove tag from one of the operands (but keep sign).
-      __ sarl(rax, Immediate(kSmiTagSize));
-      // Do multiplication.
-      __ imull(rax, rbx);  // multiplication of smis; result in eax
-      // Go slow on overflows.
-      __ j(overflow, slow);
-      // Check for negative zero result.
-      __ NegativeZeroTest(rax, rcx, slow);  // ecx (not rcx) holds x | y.
+      __ SmiMul(rax, rax, rbx, slow);
       break;
 
     case Token::DIV:
-      // Sign extend eax into edx:eax.
-      __ cdq();
-      // Check for 0 divisor.
-      __ testl(rbx, rbx);
-      __ j(zero, slow);
-      // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
-      __ idivl(rbx);
-      // Check that the remainder is zero.
-      __ testl(rdx, rdx);
-      __ j(not_zero, slow);
-      // Check for the corner case of dividing the most negative smi
-      // by -1. We cannot use the overflow flag, since it is not set
-      // by idiv instruction.
-      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-      // TODO(X64): TODO(Smi): Smi implementation dependent constant.
-      // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
-      __ cmpl(rax, Immediate(0x40000000));
-      __ j(equal, slow);
-      // Check for negative zero result.
-      __ NegativeZeroTest(rax, rcx, slow);  // ecx (not rcx) holds x | y.
-      // Tag the result and store it in register rax.
-      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
-      __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+      __ SmiDiv(rax, rax, rbx, slow);
       break;
 
     case Token::MOD:
-      // Sign extend eax into edx:eax
-      __ cdq();
-      // Check for 0 divisor.
-      __ testl(rbx, rbx);
-      __ j(zero, slow);
-      // Divide edx:eax by ebx.
-      __ idivl(rbx);
-      // Check for negative zero result.
-      __ NegativeZeroTest(rdx, rcx, slow);  // ecx (not rcx) holds x | y.
-      // Move remainder to register rax.
-      __ movl(rax, rdx);
+      __ SmiMod(rax, rax, rbx, slow);
       break;
 
     case Token::BIT_OR:
-      __ or_(rax, rbx);
+      __ SmiOr(rax, rax, rbx);
       break;
 
     case Token::BIT_AND:
-      __ and_(rax, rbx);
+      __ SmiAnd(rax, rax, rbx);
       break;
 
     case Token::BIT_XOR:
-      ASSERT_EQ(0, kSmiTag);
-      __ xor_(rax, rbx);
+      __ SmiXor(rax, rax, rbx);
       break;
 
     case Token::SHL:
@@ -7640,41 +7412,20 @@
     case Token::SAR:
       // Move the second operand into register ecx.
       __ movl(rcx, rbx);
-      // Remove tags from operands (but keep sign).
-      __ sarl(rax, Immediate(kSmiTagSize));
-      __ sarl(rcx, Immediate(kSmiTagSize));
       // Perform the operation.
       switch (op_) {
         case Token::SAR:
-          __ sarl(rax);
-          // No checks of result necessary
+          __ SmiShiftArithmeticRight(rax, rax, rbx);
           break;
         case Token::SHR:
-          __ shrl(rax);  // rcx is implicit shift register
-          // Check that the *unsigned* result fits in a smi.
-          // Neither of the two high-order bits can be set:
-          // - 0x80000000: high bit would be lost when smi tagging.
-          // - 0x40000000: this number would convert to negative when
-          // Smi tagging these two cases can only happen with shifts
-          // by 0 or 1 when handed a valid smi.
-          __ testl(rax, Immediate(0xc0000000));
-          __ j(not_zero, slow);
+          __ SmiShiftLogicalRight(rax, rax, rbx, slow);
           break;
         case Token::SHL:
-          __ shll(rax);
-          // Check that the *signed* result fits in a smi.
-          // It does, if the 30th and 31st bits are equal, since then
-          // shifting the SmiTag in at the bottom doesn't change the sign.
-          ASSERT(kSmiTagSize == 1);
-          __ cmpl(rax, Immediate(0xc0000000));
-          __ j(sign, slow);
+          __ SmiShiftLeft(rax, rax, rbx, slow);
           break;
         default:
           UNREACHABLE();
       }
-      // Tag the result and store it in register eax.
-      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
-      __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
       break;
 
     default:
@@ -7722,8 +7473,7 @@
         case OVERWRITE_RIGHT:
           // If the argument in rax is already an object, we skip the
           // allocation of a heap number.
-          __ testl(rax, Immediate(kSmiTagMask));
-          __ j(not_zero, &skip_allocation);
+          __ JumpIfNotSmi(rax, &skip_allocation);
           // Fall through!
         case NO_OVERWRITE:
           FloatingPointHelper::AllocateHeapNumber(masm,
@@ -7829,8 +7579,7 @@
         __ j(negative, &non_smi_result);
       }
       // Tag smi result and return.
-      ASSERT(kSmiTagSize == 1);  // adjust code if not the case
-      __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
+      __ Integer32ToSmi(rax, rax);
       __ ret(2 * kPointerSize);
 
       // All ops except SHR return a signed int32 that we load in a HeapNumber.
@@ -7845,8 +7594,7 @@
             // allocation of a heap number.
             __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
                                  1 * kPointerSize : 2 * kPointerSize));
-            __ testl(rax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation);
+            __ JumpIfNotSmi(rax, &skip_allocation);
             // Fall through!
           case NO_OVERWRITE:
             FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index bf415d9..b2f52b2 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -95,7 +95,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kCapacityIndex * kPointerSize;
   __ movq(r2, FieldOperand(r0, kCapacityOffset));
-  __ shrl(r2, Immediate(kSmiTagSize));  // convert smi to int
+  __ SmiToInteger32(r2, r2);
   __ decl(r2);
 
   // Generate an unrolled loop that performs a few probes before
@@ -132,7 +132,7 @@
   __ bind(&done);
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
-           Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+           Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
   __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
@@ -148,8 +148,7 @@
                                            Register value) {
   Label done;
   // Check if the value is a Smi.
-  __ testl(value, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ JumpIfSmi(value, &done);
   // Check if the object has been loaded.
   __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
   __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
@@ -167,7 +166,7 @@
   // Arguments are address of start of call sequence that called
   // the IC,
   Address test_instruction_address =
-      address + Assembler::kPatchReturnSequenceLength;
+      address + Assembler::kCallTargetAddressOffset;
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
   if (*test_instruction_address != kTestEaxByte) return false;
@@ -265,8 +264,7 @@
   __ movq(rcx, Operand(rsp, 2 * kPointerSize));
 
   // Check that the object isn't a smi.
-  __ testl(rcx, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  __ JumpIfSmi(rcx, &slow);
 
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
@@ -283,9 +281,8 @@
   __ j(not_zero, &slow);
 
   // Check that the key is a smi.
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_string);
-  __ sarl(rax, Immediate(kSmiTagSize));
+  __ JumpIfNotSmi(rax, &check_string);
+  __ SmiToInteger32(rax, rax);
   // Get the elements array of the object.
   __ bind(&index_int);
   __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
@@ -410,8 +407,7 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // 2 ~ return address, key
   // Check that the object isn't a smi.
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  __ JumpIfSmi(rdx, &slow);
   // Get the map from the receiver.
   __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
@@ -422,8 +418,7 @@
   // Get the key from the stack.
   __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
-  __ testl(rbx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow);
+  __ JumpIfNotSmi(rbx, &slow);
   // If it is a smi, make sure it is zero-extended, so it can be
   // used as an index in a memory operand.
   __ movl(rbx, rbx);  // Clear the high bits of rbx.
@@ -443,8 +438,7 @@
   __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
   __ j(not_equal, &slow);
   // Untag the key (for checking against untagged length in the fixed array).
-  __ movl(rdx, rbx);
-  __ sarl(rdx, Immediate(kSmiTagSize));
+  __ SmiToInteger32(rdx, rbx);
   __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
   // rax: value
   // rcx: FixedArray
@@ -473,13 +467,13 @@
   // rbx: index (as a smi)
   // flags: compare (rbx, rdx.length())
   __ j(not_equal, &slow);  // do not leave holes in the array
-  __ sarl(rbx, Immediate(kSmiTagSize));  // untag
+  __ SmiToInteger64(rbx, rbx);
   __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
-  // Restore tag and increment.
-  __ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
+  // Increment and restore smi-tag.
+  __ Integer64AddToSmi(rbx, rbx, 1);
   __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
-  __ subl(rbx, Immediate(1 << kSmiTagSize));  // decrement rbx again
+  __ SmiSubConstant(rbx, rbx, 1, NULL);
   __ jmp(&fast);
 
 
@@ -544,8 +538,7 @@
   // Check if the receiver is a global object of some sort.
   Label invoke, global;
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));  // receiver
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &invoke);
+  __ JumpIfSmi(rdx, &invoke);
   __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
   __ j(equal, &global);
   __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
@@ -594,8 +587,7 @@
   // to probe.
   //
   // Check for number.
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &number);
+  __ JumpIfSmi(rdx, &number);
   __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
   __ j(not_equal, &non_number);
   __ bind(&number);
@@ -640,8 +632,7 @@
 
   // Move the result to register rdi and check that it isn't a smi.
   __ movq(rdi, rdx);
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, miss);
+  __ JumpIfSmi(rdx, miss);
 
   // Check that the value is a JavaScript function.
   __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
@@ -683,8 +674,7 @@
   __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rdx, &miss);
 
   // Check that the receiver is a valid JS object.
   // Because there are so many map checks and type checks, do not
@@ -844,8 +834,7 @@
   __ movq(rax, Operand(rsp, kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rax, &miss);
 
   // Check that the receiver is a valid JS object.
   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
@@ -902,7 +891,7 @@
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
   Address test_instruction_address =
-      address + Assembler::kPatchReturnSequenceLength;
+      address + Assembler::kCallTargetAddressOffset;
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
   if (*test_instruction_address != kTestEaxByte) return false;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 104ccb8..637428d 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -412,6 +412,729 @@
 }
 
 
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+
+void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+#ifdef DEBUG
+    cmpq(src, Immediate(0xC0000000u));
+    Check(positive, "Smi conversion overflow");
+#endif
+  if (dst.is(src)) {
+    addl(dst, src);
+  } else {
+    lea(dst, Operand(src, src, times_1, 0));
+  }
+}
+
+
+void MacroAssembler::Integer32ToSmi(Register dst,
+                                    Register src,
+                                    Label* on_overflow) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  addl(dst, src);
+  j(overflow, on_overflow);
+}
+
+
+void MacroAssembler::Integer64AddToSmi(Register dst,
+                                       Register src,
+                                       int constant) {
+#ifdef DEBUG
+  movl(kScratchRegister, src);
+  addl(kScratchRegister, Immediate(constant));
+  Check(no_overflow, "Add-and-smi-convert overflow");
+  Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
+  Check(valid, "Add-and-smi-convert overflow");
+#endif
+  lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
+}
+
+
+void MacroAssembler::SmiToInteger32(Register dst, Register src) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  sarl(dst, Immediate(kSmiTagSize));
+}
+
+
+void MacroAssembler::SmiToInteger64(Register dst, Register src) {
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiTag);
+  movsxlq(dst, src);
+  sar(dst, Immediate(kSmiTagSize));
+}
+
+
+void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+                                                           Register src,
+                                                           int power) {
+  ASSERT(power >= 0);
+  ASSERT(power < 64);
+  if (power == 0) {
+    SmiToInteger64(dst, src);
+    return;
+  }
+  movsxlq(dst, src);
+  shl(dst, Immediate(power - 1));
+}
+
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  testl(src, Immediate(kSmiTagMask));
+  j(zero, on_smi);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+  Condition not_smi = CheckNotSmi(src);
+  j(not_smi, on_not_smi);
+}
+
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+                                          Label* on_not_positive_smi) {
+  Condition not_positive_smi = CheckNotPositiveSmi(src);
+  j(not_positive_smi, on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             int constant,
+                                             Label* on_equals) {
+  if (Smi::IsValid(constant)) {
+    Condition are_equal = CheckSmiEqualsConstant(src, constant);
+    j(are_equal, on_equals);
+  }
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(ReverseCondition(is_valid), on_invalid);
+}
+
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      Label* on_not_both_smi) {
+  Condition not_both_smi = CheckNotBothSmi(src1, src2);
+  j(not_both_smi, on_not_both_smi);
+}
+
+Condition MacroAssembler::CheckSmi(Register src) {
+  testb(src, Immediate(kSmiTagMask));
+  return zero;
+}
+
+
+Condition MacroAssembler::CheckNotSmi(Register src) {
+  ASSERT_EQ(0, kSmiTag);
+  testb(src, Immediate(kSmiTagMask));
+  return not_zero;
+}
+
+
+Condition MacroAssembler::CheckPositiveSmi(Register src) {
+  ASSERT_EQ(0, kSmiTag);
+  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+  return zero;
+}
+
+
+Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
+  ASSERT_EQ(0, kSmiTag);
+  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+  return not_zero;
+}
+
+
+Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
+  if (first.is(second)) {
+    return CheckSmi(first);
+  }
+  movl(kScratchRegister, first);
+  orl(kScratchRegister, second);
+  return CheckSmi(kScratchRegister);
+}
+
+
+Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
+  ASSERT_EQ(0, kSmiTag);
+  if (first.is(second)) {
+    return CheckNotSmi(first);
+  }
+  movl(kScratchRegister, first);
+  or_(kScratchRegister, second);
+  return CheckNotSmi(kScratchRegister);
+}
+
+
+Condition MacroAssembler::CheckIsMinSmi(Register src) {
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  cmpl(src, Immediate(0x40000000));
+  return equal;
+}
+
+Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
+  if (constant == 0) {
+    testl(src, src);
+    return zero;
+  }
+  if (Smi::IsValid(constant)) {
+    cmpl(src, Immediate(Smi::FromInt(constant)));
+    return zero;
+  }
+  // Can't be equal.
+  UNREACHABLE();
+  return no_condition;
+}
+
+
+Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
+  // A 32-bit integer value can be converted to a smi if it is in the
+  // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
+  // representation have bits 30 and 31 be equal.
+  cmpl(src, Immediate(0xC0000000u));
+  return positive;
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            Label* on_not_smi_result) {
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  negl(dst);
+  testl(dst, Immediate(0x7fffffff));
+  // If the result is zero or 0x80000000, negation failed to create a smi.
+  j(equal, on_not_smi_result);
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  addl(dst, src2);
+  if (!dst.is(src1)) {
+    j(overflow, on_not_smi_result);
+  } else {
+    Label smi_result;
+    j(no_overflow, &smi_result);
+    // Restore src1.
+    subl(src1, src2);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  }
+}
+
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  subl(dst, src2);
+  if (!dst.is(src1)) {
+    j(overflow, on_not_smi_result);
+  } else {
+    Label smi_result;
+    j(no_overflow, &smi_result);
+    // Restore src1.
+    addl(src1, src2);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  }
+}
+
+
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(src2));
+
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(dst, src1);
+
+  imull(dst, src2);
+  j(overflow, on_not_smi_result);
+
+  // Check for negative zero result.  If product is zero, and one
+  // argument is negative, go to slow case.  The frame is unchanged
+  // in this block, so local control flow can use a Label rather
+  // than a JumpTarget.
+  Label non_zero_result;
+  testl(dst, dst);
+  j(not_zero, &non_zero_result);
+
+  // Test whether either operand is negative (the other must be zero).
+  orl(kScratchRegister, src2);
+  j(negative, on_not_smi_result);
+  bind(&non_zero_result);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       int32_t constant,
+                                       Label* on_not_smi_result) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(1, kSmiTagMask);
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT(Smi::IsValid(constant));
+
+  Register tmp = (src.is(dst) ? kScratchRegister : dst);
+  movl(tmp, src);
+  addl(tmp, Immediate(Smi::FromInt(constant)));
+  if (tmp.is(kScratchRegister)) {
+    j(overflow, on_not_smi_result);
+    testl(tmp, Immediate(kSmiTagMask));
+    j(not_zero, on_not_smi_result);
+    movl(dst, tmp);
+  } else {
+    movl(kScratchRegister, Immediate(kSmiTagMask));
+    cmovl(overflow, dst, kScratchRegister);
+    testl(dst, kScratchRegister);
+    j(not_zero, on_not_smi_result);
+  }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    int32_t constant,
+                                    Label* on_not_smi_result) {
+  ASSERT(Smi::IsValid(constant));
+  if (on_not_smi_result == NULL) {
+    if (dst.is(src)) {
+      movl(dst, src);
+    } else {
+      lea(dst, Operand(src, constant << kSmiTagSize));
+    }
+  } else {
+    if (!dst.is(src)) {
+      movl(dst, src);
+    }
+    addl(dst, Immediate(Smi::FromInt(constant)));
+    if (!dst.is(src)) {
+      j(overflow, on_not_smi_result);
+    } else {
+      Label result_ok;
+      j(no_overflow, &result_ok);
+      subl(dst, Immediate(Smi::FromInt(constant)));
+      jmp(on_not_smi_result);
+      bind(&result_ok);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    int32_t constant,
+                                    Label* on_not_smi_result) {
+  ASSERT(Smi::IsValid(constant));
+  Smi* smi_value = Smi::FromInt(constant);
+  if (dst.is(src)) {
+    // Optimistic subtract - may change value of dst register,
+    // if it has garbage bits in the higher half, but will not change
+    // the value as a tagged smi.
+    subl(dst, Immediate(smi_value));
+    if (on_not_smi_result != NULL) {
+      Label add_success;
+      j(no_overflow, &add_success);
+      addl(dst, Immediate(smi_value));
+      jmp(on_not_smi_result);
+      bind(&add_success);
+    }
+  } else {
+    UNIMPLEMENTED();  // Not used yet.
+  }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  Label positive_divisor;
+  testl(src2, src2);
+  j(zero, on_not_smi_result);
+  j(positive, &positive_divisor);
+  // Check for negative zero result.  If the dividend is zero, and the
+  // divisor is negative, return a floating point negative zero.
+  testl(src1, src1);
+  j(zero, on_not_smi_result);
+  bind(&positive_divisor);
+
+  // Sign extend src1 into edx:eax.
+  if (!src1.is(rax)) {
+    movl(rax, src1);
+  }
+  cdq();
+
+  idivl(src2);
+  // Check for the corner case of dividing the most negative smi by
+  // -1. We cannot use the overflow flag, since it is not set by
+  // idiv instruction.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  cmpl(rax, Immediate(0x40000000));
+  j(equal, on_not_smi_result);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  j(not_zero, on_not_smi_result);
+  // Tag the result and store it in the destination register.
+  Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  testl(src2, src2);
+  j(zero, on_not_smi_result);
+
+  if (src1.is(rax)) {
+    // Mist remember the value to see if a zero result should
+    // be a negative zero.
+    movl(kScratchRegister, rax);
+  } else {
+    movl(rax, src1);
+  }
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, return a floating point negative zero.
+  Label non_zero_result;
+  testl(rdx, rdx);
+  j(not_zero, &non_zero_result);
+  if (src1.is(rax)) {
+    testl(kScratchRegister, kScratchRegister);
+  } else {
+    testl(src1, src1);
+  }
+  j(negative, on_not_smi_result);
+  bind(&non_zero_result);
+  if (!dst.is(rdx)) {
+    movl(dst, rdx);
+  }
+}
+
+
+void MacroAssembler::SmiNot(Register dst, Register src) {
+  if (dst.is(src)) {
+    not_(dst);
+    // Remove inverted smi-tag.  The mask is sign-extended to 64 bits.
+    xor_(src, Immediate(kSmiTagMask));
+  } else {
+    ASSERT_EQ(0, kSmiTag);
+    lea(dst, Operand(src, kSmiTagMask));
+    not_(dst);
+  }
+}
+
+
+void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  and_(dst, src2);
+}
+
+
+void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
+  ASSERT(Smi::IsValid(constant));
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  and_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+
+void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  or_(dst, src2);
+}
+
+
+void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
+  ASSERT(Smi::IsValid(constant));
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  or_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
+  if (!dst.is(src1)) {
+    movl(dst, src1);
+  }
+  xor_(dst, src2);
+}
+
+
+void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
+  ASSERT(Smi::IsValid(constant));
+  if (!dst.is(src)) {
+    movl(dst, src);
+  }
+  xor_(dst, Immediate(Smi::FromInt(constant)));
+}
+
+
+
+void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
+                                                     Register src,
+                                                     int shift_value) {
+  if (shift_value > 0) {
+    if (dst.is(src)) {
+      sarl(dst, Immediate(shift_value));
+      and_(dst, Immediate(~kSmiTagMask));
+    } else {
+      UNIMPLEMENTED();  // Not used.
+    }
+  }
+}
+
+
+void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
+                                                  Register src,
+                                                  int shift_value,
+                                                  Label* on_not_smi_result) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movl(dst, src);
+    // Untag the smi.
+    sarl(dst, Immediate(kSmiTagSize));
+    if (shift_value < 2) {
+      // A negative Smi shifted right two is in the positive Smi range,
+      // but if shifted only by zero or one, it never is.
+      j(negative, on_not_smi_result);
+    }
+    if (shift_value > 0) {
+      // Do the right shift on the integer value.
+      shrl(dst, Immediate(shift_value));
+    }
+    // Re-tag the result.
+    addl(dst, dst);
+  }
+}
+
+
+void MacroAssembler::SmiShiftLeftConstant(Register dst,
+                                          Register src,
+                                          int shift_value,
+                                          Label* on_not_smi_result) {
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movl(dst, src);
+    if (shift_value > 0) {
+      // Treat dst as an untagged integer value equal to two times the
+      // smi value of src, i.e., already shifted left by one.
+      if (shift_value > 1) {
+        shll(dst, Immediate(shift_value - 1));
+      }
+      // Convert int result to Smi, checking that it is in smi range.
+      ASSERT(kSmiTagSize == 1);  // adjust code if not the case
+      Integer32ToSmi(dst, dst, on_not_smi_result);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiShiftLeft(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smi_result) {
+  ASSERT(!dst.is(rcx));
+  Label result_ok;
+  // Untag both operands.
+  SmiToInteger32(dst, src1);
+  SmiToInteger32(rcx, src2);
+  shll(dst);
+  // Check that the *signed* result fits in a smi.
+  Condition is_valid = CheckInteger32ValidSmiValue(dst);
+  j(is_valid, &result_ok);
+  // Restore the relevant bits of the source registers
+  // and call the slow version.
+  if (dst.is(src1)) {
+    shrl(dst);
+    Integer32ToSmi(dst, dst);
+  }
+  Integer32ToSmi(rcx, rcx);
+  jmp(on_not_smi_result);
+  bind(&result_ok);
+  Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          Label* on_not_smi_result) {
+  ASSERT(!dst.is(rcx));
+  Label result_ok;
+  // Untag both operands.
+  SmiToInteger32(dst, src1);
+  SmiToInteger32(rcx, src2);
+
+  shrl(dst);
+  // Check that the *unsigned* result fits in a smi.
+  // I.e., that it is a valid positive smi value. The positive smi
+  // values are  0..0x3fffffff, i.e., neither of the top-most two
+  // bits can be set.
+  //
+  // These two cases can only happen with shifts by 0 or 1 when
+  // handed a valid smi.  If the answer cannot be represented by a
+  // smi, restore the left and right arguments, and jump to slow
+  // case.  The low bit of the left argument may be lost, but only
+  // in a case where it is dropped anyway.
+  testl(dst, Immediate(0xc0000000));
+  j(zero, &result_ok);
+  if (dst.is(src1)) {
+    shll(dst);
+    Integer32ToSmi(dst, dst);
+  }
+  Integer32ToSmi(rcx, rcx);
+  jmp(on_not_smi_result);
+  bind(&result_ok);
+  // Smi-tag the result in answer.
+  Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SmiShiftArithmeticRight(Register dst,
+                                             Register src1,
+                                             Register src2) {
+  ASSERT(!dst.is(rcx));
+  // Untag both operands.
+  SmiToInteger32(dst, src1);
+  SmiToInteger32(rcx, src2);
+  // Shift as integer.
+  sarl(dst);
+  // Retag result.
+  Integer32ToSmi(dst, dst);
+}
+
+
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smis) {
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  Condition not_both_smis = CheckNotBothSmi(src1, src2);
+  Check(not_both_smis, "Both registers were smis.");
+#endif
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movq(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  j(not_zero, on_not_smis);
+  // One operand is a smi.
+
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
+}
+
+
+SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+  ASSERT(is_uint6(shift));
+  if (shift == 0) {  // times_1.
+    SmiToInteger32(dst, src);
+    return SmiIndex(dst, times_1);
+  }
+  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
+    // We expect that all smis are actually zero-padded. If this holds after
+    // checking, this line can be omitted.
+    movl(dst, src);  // Ensure that the smi is zero-padded.
+    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+  }
+  // Shift by shift-kSmiTagSize.
+  movl(dst, src);  // Ensure that the smi is zero-padded.
+  shl(dst, Immediate(shift - kSmiTagSize));
+  return SmiIndex(dst, times_1);
+}
+
+
+SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
+                                            Register src,
+                                            int shift) {
+  // Register src holds a positive smi.
+  ASSERT(is_uint6(shift));
+  if (shift == 0) {  // times_1.
+    SmiToInteger32(dst, src);
+    neg(dst);
+    return SmiIndex(dst, times_1);
+  }
+  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
+    movl(dst, src);
+    neg(dst);
+    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+  }
+  // Shift by shift-kSmiTagSize.
+  movl(dst, src);
+  neg(dst);
+  shl(dst, Immediate(shift - kSmiTagSize));
+  return SmiIndex(dst, times_1);
+}
+
+
+
 bool MacroAssembler::IsUnsafeSmi(Smi* value) {
   return false;
 }
@@ -520,7 +1243,7 @@
 #endif
   jmp(kScratchRegister);
 #ifdef DEBUG
-  ASSERT_EQ(kPatchReturnSequenceLength,
+  ASSERT_EQ(kCallTargetAddressOffset,
             SizeOfCodeGeneratedSince(&target) + kPointerSize);
 #endif
 }
@@ -549,7 +1272,7 @@
 #endif
   call(kScratchRegister);
 #ifdef DEBUG
-  ASSERT_EQ(kPatchReturnSequenceLength,
+  ASSERT_EQ(kCallTargetAddressOffset,
             SizeOfCodeGeneratedSince(&target) + kPointerSize);
 #endif
 }
@@ -599,7 +1322,7 @@
 
 
 void MacroAssembler::FCmp() {
-  fcompp();
+  fucompp();
   push(rax);
   fnstsw_ax();
   if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
@@ -821,7 +1544,7 @@
         Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry =
-        { pc_offset() - kPatchReturnSequenceLength, flags, name };
+        { pc_offset() - kCallTargetAddressOffset, flags, name };
     unresolved_.Add(entry);
   }
 }
@@ -1406,4 +2129,23 @@
 }
 
 
+CodePatcher::CodePatcher(byte* address, int size)
+    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  CPU::FlushICache(address_, size_);
+
+  // Check that the code was patched as expected.
+  ASSERT(masm_.pc_ == address_ + size_);
+  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9da2676..de2070a 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -41,6 +41,13 @@
 // Forward declaration.
 class JumpTarget;
 
+struct SmiIndex {
+  SmiIndex(Register index_register, ScaleFactor scale)
+      : reg(index_register),
+        scale(scale) {}
+  Register reg;
+  ScaleFactor scale;
+};
 
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
@@ -126,6 +133,230 @@
   // Store the code object for the given builtin in the target register.
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
+
+  // ---------------------------------------------------------------------------
+  // Smi tagging, untagging and operations on tagged smis.
+
+  // Conversions between tagged smi values and non-tagged integer values.
+
+  // Tag an integer value. The result must be known to be a valid smi value.
+  // Only uses the low 32 bits of the src register.
+  void Integer32ToSmi(Register dst, Register src);
+
+  // Tag an integer value if possible, or jump the integer value cannot be
+  // represented as a smi. Only uses the low 32 bit of the src registers.
+  void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
+
+  // Adds constant to src and tags the result as a smi.
+  // Result must be a valid smi.
+  void Integer64AddToSmi(Register dst, Register src, int constant);
+
+  // Convert smi to 32-bit integer. I.e., not sign extended into
+  // high 32 bits of destination.
+  void SmiToInteger32(Register dst, Register src);
+
+  // Convert smi to 64-bit integer (sign extended if necessary).
+  void SmiToInteger64(Register dst, Register src);
+
+  // Multiply a positive smi's integer value by a power of two.
+  // Provides result as 64-bit integer value.
+  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+                                             Register src,
+                                             int power);
+
+  // Functions performing a check on a known or potential smi. Returns
+  // a condition that is satisfied if the check is successful.
+
+  // Is the value a tagged smi.
+  Condition CheckSmi(Register src);
+
+  // Is the value not a tagged smi.
+  Condition CheckNotSmi(Register src);
+
+  // Is the value a positive tagged smi.
+  Condition CheckPositiveSmi(Register src);
+
+  // Is the value not a positive tagged smi.
+  Condition CheckNotPositiveSmi(Register src);
+
+  // Are both values are tagged smis.
+  Condition CheckBothSmi(Register first, Register second);
+
+  // Is one of the values not a tagged smi.
+  Condition CheckNotBothSmi(Register first, Register second);
+
+  // Is the value the minimum smi value (since we are using
+  // two's complement numbers, negating the value is known to yield
+  // a non-smi value).
+  Condition CheckIsMinSmi(Register src);
+
+  // Check whether a tagged smi is equal to a constant.
+  Condition CheckSmiEqualsConstant(Register src, int constant);
+
+  // Checks whether an 32-bit integer value is a valid for conversion
+  // to a smi.
+  Condition CheckInteger32ValidSmiValue(Register src);
+
+  // Test-and-jump functions. Typically combines a check function
+  // above with a conditional jump.
+
+  // Jump if the value cannot be represented by a smi.
+  void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
+
+  // Jump to label if the value is a tagged smi.
+  void JumpIfSmi(Register src, Label* on_smi);
+
+  // Jump to label if the value is not a tagged smi.
+  void JumpIfNotSmi(Register src, Label* on_not_smi);
+
+  // Jump to label if the value is not a positive tagged smi.
+  void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
+
+  // Jump to label if the value is a tagged smi with value equal
+  // to the constant.
+  void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
+
+  // Jump if either or both register are not smi values.
+  void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
+
+  // Operations on tagged smi values.
+
+  // Smis represent a subset of integers. The subset is always equivalent to
+  // a two's complement interpretation of a fixed number of bits.
+
+  // Optimistically adds an integer constant to a supposed smi.
+  // If the src is not a smi, or the result is not a smi, jump to
+  // the label.
+  void SmiTryAddConstant(Register dst,
+                         Register src,
+                         int32_t constant,
+                         Label* on_not_smi_result);
+
+  // Add an integer constant to a tagged smi, giving a tagged smi as result,
+  // or jumping to a label if the result cannot be represented by a smi.
+  // If the label is NULL, no testing on the result is done.
+  void SmiAddConstant(Register dst,
+                      Register src,
+                      int32_t constant,
+                      Label* on_not_smi_result);
+
+  // Subtract an integer constant from a tagged smi, giving a tagged smi as
+  // result, or jumping to a label if the result cannot be represented by a smi.
+  // If the label is NULL, no testing on the result is done.
+  void SmiSubConstant(Register dst,
+                      Register src,
+                      int32_t constant,
+                      Label* on_not_smi_result);
+
+  // Negating a smi can give a negative zero or too large positive value.
+  void SmiNeg(Register dst,
+              Register src,
+              Label* on_not_smi_result);
+
+  // Adds smi values and return the result as a smi.
+  // If dst is src1, then src1 will be destroyed, even if
+  // the operation is unsuccessful.
+  void SmiAdd(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Subtracts smi values and return the result as a smi.
+  // If dst is src1, then src1 will be destroyed, even if
+  // the operation is unsuccessful.
+  void SmiSub(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Multiplies smi values and return the result as a smi,
+  // if possible.
+  // If dst is src1, then src1 will be destroyed, even if
+  // the operation is unsuccessful.
+  void SmiMul(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Divides one smi by another and returns the quotient.
+  // Clobbers rax and rdx registers.
+  void SmiDiv(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Divides one smi by another and returns the remainder.
+  // Clobbers rax and rdx registers.
+  void SmiMod(Register dst,
+              Register src1,
+              Register src2,
+              Label* on_not_smi_result);
+
+  // Bitwise operations.
+  void SmiNot(Register dst, Register src);
+  void SmiAnd(Register dst, Register src1, Register src2);
+  void SmiOr(Register dst, Register src1, Register src2);
+  void SmiXor(Register dst, Register src1, Register src2);
+  void SmiAndConstant(Register dst, Register src1, int constant);
+  void SmiOrConstant(Register dst, Register src1, int constant);
+  void SmiXorConstant(Register dst, Register src1, int constant);
+
+  void SmiShiftLeftConstant(Register dst,
+                            Register src,
+                            int shift_value,
+                            Label* on_not_smi_result);
+  void SmiShiftLogicalRightConstant(Register dst,
+                                  Register src,
+                                  int shift_value,
+                                  Label* on_not_smi_result);
+  void SmiShiftArithmeticRightConstant(Register dst,
+                                       Register src,
+                                       int shift_value);
+
+  // Shifts a smi value to the left, and returns the result if that is a smi.
+  // Uses and clobbers rcx, so dst may not be rcx.
+  void SmiShiftLeft(Register dst,
+                    Register src1,
+                    Register src2,
+                    Label* on_not_smi_result);
+  // Shifts a smi value to the right, shifting in zero bits at the top, and
+  // returns the unsigned intepretation of the result if that is a smi.
+  // Uses and clobbers rcx, so dst may not be rcx.
+  void SmiShiftLogicalRight(Register dst,
+                          Register src1,
+                          Register src2,
+                          Label* on_not_smi_result);
+  // Shifts a smi value to the right, sign extending the top, and
+  // returns the signed intepretation of the result. That will always
+  // be a valid smi value, since it's numerically smaller than the
+  // original.
+  // Uses and clobbers rcx, so dst may not be rcx.
+  void SmiShiftArithmeticRight(Register dst,
+                               Register src1,
+                               Register src2);
+
+  // Specialized operations
+
+  // Select the non-smi register of two registers where exactly one is a
+  // smi. If neither are smis, jump to the failure label.
+  void SelectNonSmi(Register dst,
+                    Register src1,
+                    Register src2,
+                    Label* on_not_smis);
+
+  // Converts, if necessary, a smi to a combination of number and
+  // multiplier to be used as a scaled index.
+  // The src register contains a *positive* smi value. The shift is the
+  // power of two to multiply the index value by (e.g.
+  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
+  // The returned index register may be either src or dst, depending
+  // on what is most efficient. If src and dst are different registers,
+  // src is always unchanged.
+  SmiIndex SmiToIndex(Register dst, Register src, int shift);
+
+  // Converts a positive smi to a negative index.
+  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
+
   // ---------------------------------------------------------------------------
   // Macro instructions
 
@@ -380,6 +611,26 @@
 };
 
 
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int size);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index fcb2092..741d4c3 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -47,17 +47,19 @@
                        StubCache::Table table,
                        Register name,
                        Register offset) {
+  // The offset register must hold a *positive* smi.
   ExternalReference key_offset(SCTableReference::keyReference(table));
   Label miss;
 
   __ movq(kScratchRegister, key_offset);
+  SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
   // Check that the key in the entry matches the name.
-  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+  __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
   __ j(not_equal, &miss);
   // Get the code entry from the cache.
   // Use key_offset + kPointerSize, rather than loading value_offset.
   __ movq(kScratchRegister,
-          Operand(kScratchRegister, offset, times_4, kPointerSize));
+          Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
   // Check that the flags match what we're looking for.
   __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
   __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
@@ -163,8 +165,7 @@
   ASSERT(!scratch.is(name));
 
   // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
   __ movl(scratch, FieldOperand(name, String::kLengthOffset));
@@ -204,8 +205,7 @@
                                       Register scratch,
                                       Label* miss_label) {
   // Check that the object isn't a smi.
-  __ testl(receiver_reg, Immediate(kSmiTagMask));
-  __ j(zero, miss_label);
+  __ JumpIfSmi(receiver_reg, miss_label);
 
   // Check that the map of the object hasn't changed.
   __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
@@ -275,8 +275,7 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label);
+  __ JumpIfSmi(receiver, miss_label);
 
   // Check that the object is a JS array.
   __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@@ -296,8 +295,7 @@
                                 Label* smi,
                                 Label* non_string_object) {
   // Check that the object isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, smi);
+  __ JumpIfSmi(receiver, smi);
 
   // Check that the object is a string.
   __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -325,7 +323,7 @@
   // rcx is also the receiver.
   __ lea(rcx, Operand(scratch, String::kLongLengthShift));
   __ shr(rax);  // rcx is implicit shift register.
-  __ shl(rax, Immediate(kSmiTagSize));
+  __ Integer32ToSmi(rax, rax);
   __ ret(0);
 
   // Check if the object is a JSValue wrapper.
@@ -535,8 +533,7 @@
   ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
   // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -701,8 +698,7 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ testl(rdx, Immediate(kSmiTagMask));
-    __ j(zero, &miss);
+    __ JumpIfSmi(rdx, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
@@ -738,8 +734,7 @@
     case NUMBER_CHECK: {
       Label fast;
       // Check that the object is a smi or a heap number.
-      __ testl(rdx, Immediate(kSmiTagMask));
-      __ j(zero, &fast);
+      __ JumpIfSmi(rdx, &fast);
       __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
       __ j(not_equal, &miss);
       __ bind(&fast);
@@ -830,8 +825,7 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Check that the receiver isn't a smi.
-  __ testl(rdx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rdx, &miss);
 
   // Do the right check and compute the holder register.
   Register reg =
@@ -841,8 +835,7 @@
   GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
 
   // Check that the function really is a function.
-  __ testl(rdi, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rdi, &miss);
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &miss);
 
@@ -899,8 +892,7 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Check that the function really is a function.
-  __ testl(rax, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rax, &miss);
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &miss);
 
@@ -952,8 +944,7 @@
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ testl(rdx, Immediate(kSmiTagMask));
-    __ j(zero, &miss);
+    __ JumpIfSmi(rdx, &miss);
   }
 
   // Check that the maps haven't changed.
@@ -1112,8 +1103,7 @@
   // object which can only happen for contextual loads. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ testl(rax, Immediate(kSmiTagMask));
-    __ j(zero, &miss);
+    __ JumpIfSmi(rax, &miss);
   }
 
   // Check that the maps haven't changed.
@@ -1335,8 +1325,7 @@
   __ movq(rbx, Operand(rsp, 1 * kPointerSize));
 
   // Check that the object isn't a smi.
-  __ testl(rbx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rbx, &miss);
 
   // Check that the map of the object hasn't changed.
   __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@@ -1424,8 +1413,7 @@
   __ movq(rbx, Operand(rsp, 1 * kPointerSize));
 
   // Check that the object isn't a smi.
-  __ testl(rbx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
+  __ JumpIfSmi(rbx, &miss);
 
   // Check that the map of the object hasn't changed.
   __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@@ -1631,8 +1619,7 @@
                                         String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -1701,8 +1688,7 @@
                                      String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
   Register reg =
@@ -1724,8 +1710,7 @@
                                         String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
-  __ testl(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss);
+  __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -1766,8 +1751,7 @@
   // Load the initial map and verify that it is in fact a map.
   __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
   // Will both indicate a NULL and a Smi.
-  __ testq(rbx, Immediate(kSmiTagMask));
-  __ j(zero, &generic_stub_call);
+  __ JumpIfSmi(rbx, &generic_stub_call);
   __ CmpObjectType(rbx, MAP_TYPE, rcx);
   __ j(not_equal, &generic_stub_call);
 
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index c2866a7..655f4c6 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -65,8 +65,8 @@
 #ifdef DEBUG
   // Verify that rdi contains a JS function.  The following code
   // relies on rax being available for use.
-  __ testl(rdi, Immediate(kSmiTagMask));
-  __ Check(not_zero,
+  Condition not_smi = masm()->CheckNotSmi(rdi);
+  __ Check(not_smi,
            "VirtualFrame::Enter - rdi is not a function (smi check).");
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
   __ Check(equal,
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 68aabb5..8fff769 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -36,8 +36,6 @@
 
 [ $arch == arm ]
 
-test-debug: SKIP
-
 # BUG(113): Test seems flaky on ARM.
 test-spaces/LargeObjectSpace: PASS || FAIL
 
diff --git a/test/cctest/test-conversions.cc b/test/cctest/test-conversions.cc
index 6c0b9a6..35ab46f 100644
--- a/test/cctest/test-conversions.cc
+++ b/test/cctest/test-conversions.cc
@@ -91,13 +91,15 @@
   CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS));
 }
 
+class OneBit1: public BitField<uint32_t, 0, 1> {};
+class OneBit2: public BitField<uint32_t, 7, 1> {};
+class EightBit1: public BitField<uint32_t, 0, 8> {};
+class EightBit2: public BitField<uint32_t, 13, 8> {};
 
 TEST(BitField) {
   uint32_t x;
 
   // One bit bit field can hold values 0 and 1.
-  class OneBit1: public BitField<uint32_t, 0, 1> {};
-  class OneBit2: public BitField<uint32_t, 7, 1> {};
   CHECK(!OneBit1::is_valid(static_cast<uint32_t>(-1)));
   CHECK(!OneBit2::is_valid(static_cast<uint32_t>(-1)));
   for (int i = 0; i < 2; i++) {
@@ -113,8 +115,6 @@
   CHECK(!OneBit2::is_valid(2));
 
   // Eight bit bit field can hold values from 0 tp 255.
-  class EightBit1: public BitField<uint32_t, 0, 8> {};
-  class EightBit2: public BitField<uint32_t, 13, 8> {};
   CHECK(!EightBit1::is_valid(static_cast<uint32_t>(-1)));
   CHECK(!EightBit2::is_valid(static_cast<uint32_t>(-1)));
   for (int i = 0; i < 256; i++) {
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 0cae26c..436084a 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -2301,13 +2301,8 @@
   break_point_hit_count = 0;
   foo->Call(env->Global(), 0, NULL);
 
-  // With stepping all break locations are hit. For ARM the keyed load/store
-  // is not hit as they are not implemented as ICs.
-#if defined (__arm__) || defined(__thumb__)
-  CHECK_EQ(6, break_point_hit_count);
-#else
+  // With stepping all break locations are hit.
   CHECK_EQ(8, break_point_hit_count);
-#endif
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 127b7a2..bb9a6f9 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -48,6 +48,21 @@
 static const int SUPER_DEEP_DEPTH = 80 * 1024;
 
 
+class Resource: public v8::String::ExternalStringResource,
+                public ZoneObject {
+ public:
+  explicit Resource(Vector<const uc16> string): data_(string.start()) {
+    length_ = string.length();
+  }
+  virtual const uint16_t* data() const { return data_; }
+  virtual size_t length() const { return length_; }
+
+ private:
+  const uc16* data_;
+  size_t length_;
+};
+
+
 static void InitializeBuildingBlocks(
     Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
   // A list of pointers that we don't have any interest in cleaning up.
@@ -83,19 +98,6 @@
         break;
       }
       case 2: {
-        class Resource: public v8::String::ExternalStringResource,
-                        public ZoneObject {
-         public:
-          explicit Resource(Vector<const uc16> string): data_(string.start()) {
-            length_ = string.length();
-          }
-          virtual const uint16_t* data() const { return data_; }
-          virtual size_t length() const { return length_; }
-
-         private:
-          const uc16* data_;
-          size_t length_;
-        };
         uc16* buf = Zone::NewArray<uc16>(len);
         for (int j = 0; j < len; j++) {
           buf[j] = gen() % 65536;
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 3b89154..13e69ae 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -45,31 +45,6 @@
 # Flaky test that can hit compilation-time stack overflow in debug mode.
 unicode-test: PASS, (PASS || FAIL) if $mode == debug
 
-# Bug number 1020483: Debug tests fail on ARM.
-debug-constructor: CRASH, FAIL
-debug-continue: SKIP
-debug-evaluate-recursive: CRASH || FAIL
-debug-changebreakpoint: CRASH || FAIL
-debug-clearbreakpoint: CRASH || FAIL
-debug-clearbreakpointgroup: PASS, FAIL if $mode == debug
-debug-conditional-breakpoints: CRASH || FAIL
-debug-evaluate: CRASH || FAIL
-debug-ignore-breakpoints: CRASH || FAIL
-debug-multiple-breakpoints: CRASH || FAIL
-debug-setbreakpoint: CRASH || FAIL || PASS
-debug-step-stub-callfunction: SKIP
-debug-stepin-accessor: CRASH || FAIL
-debug-stepin-builtin: CRASH || FAIL
-debug-stepin-call-function-stub: CRASH || FAIL
-debug-stepin-constructor: CRASH, FAIL
-debug-stepin-function-call: CRASH || FAIL
-debug-stepout-recursive-function: CRASH || FAIL
-debug-stepout-to-builtin: CRASH || FAIL
-debug-step: SKIP
-debug-breakpoints: PASS || FAIL
-debug-handle: CRASH || FAIL || PASS
-regress/regress-269: SKIP
-
 # Bug number 130 http://code.google.com/p/v8/issues/detail?id=130
 # Fails on real ARM hardware but not on the simulator.
 string-compare-alignment: PASS || FAIL
diff --git a/test/mjsunit/smi-negative-zero.js b/test/mjsunit/smi-negative-zero.js
index afeb6de..719ee49 100644
--- a/test/mjsunit/smi-negative-zero.js
+++ b/test/mjsunit/smi-negative-zero.js
@@ -47,40 +47,40 @@
 assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
 assertEquals(one / (zero / one), Infinity, "one / 0 II");
 
-assertEquals(one / (minus_four % two), -Infinity, "foo");
-assertEquals(one / (minus_four % minus_two), -Infinity, "foo");
-assertEquals(one / (four % two), Infinity, "foo");
-assertEquals(one / (four % minus_two), Infinity, "foo");
+assertEquals(one / (minus_four % two), -Infinity, "foo1");
+assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
+assertEquals(one / (four % two), Infinity, "foo3");
+assertEquals(one / (four % minus_two), Infinity, "foo4");
 
 // literal op variable
 
-assertEquals(one / (0 * minus_one), -Infinity, "bar");
-assertEquals(one / (-1 * zero), -Infinity, "bar");
-assertEquals(one / (0 * zero), Infinity, "bar");
-assertEquals(one / (-1 * minus_one), 1, "bar");
+assertEquals(one / (0 * minus_one), -Infinity, "bar1");
+assertEquals(one / (-1 * zero), -Infinity, "bar2");
+assertEquals(one / (0 * zero), Infinity, "bar3");
+assertEquals(one / (-1 * minus_one), 1, "bar4");
 
-assertEquals(one / (0 / minus_one), -Infinity, "baz");
-assertEquals(one / (0 / one), Infinity, "baz");
+assertEquals(one / (0 / minus_one), -Infinity, "baz1");
+assertEquals(one / (0 / one), Infinity, "baz2");
 
-assertEquals(one / (-4 % two), -Infinity, "baz");
-assertEquals(one / (-4 % minus_two), -Infinity, "baz");
-assertEquals(one / (4 % two), Infinity, "baz");
-assertEquals(one / (4 % minus_two), Infinity, "baz");
+assertEquals(one / (-4 % two), -Infinity, "baz3");
+assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
+assertEquals(one / (4 % two), Infinity, "baz5");
+assertEquals(one / (4 % minus_two), Infinity, "baz6");
 
 // variable op literal
 
-assertEquals(one / (zero * -1), -Infinity, "fizz");
-assertEquals(one / (minus_one * 0), -Infinity, "fizz");
-assertEquals(one / (zero * 0), Infinity, "fizz");
-assertEquals(one / (minus_one * -1), 1, "fizz");
+assertEquals(one / (zero * -1), -Infinity, "fizz1");
+assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
+assertEquals(one / (zero * 0), Infinity, "fizz3");
+assertEquals(one / (minus_one * -1), 1, "fizz4");
 
-assertEquals(one / (zero / -1), -Infinity, "buzz");
-assertEquals(one / (zero / 1), Infinity, "buzz");
+assertEquals(one / (zero / -1), -Infinity, "buzz1");
+assertEquals(one / (zero / 1), Infinity, "buzz2");
 
-assertEquals(one / (minus_four % 2), -Infinity, "buzz");
-assertEquals(one / (minus_four % -2), -Infinity, "buzz");
-assertEquals(one / (four % 2), Infinity, "buzz");
-assertEquals(one / (four % -2), Infinity, "buzz");
+assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
+assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
+assertEquals(one / (four % 2), Infinity, "buzz5");
+assertEquals(one / (four % -2), Infinity, "buzz6");
 
 // literal op literal
 
@@ -91,10 +91,10 @@
 assertEquals(one / (0 * 0), Infinity, "fisk4");
 assertEquals(one / (-1 * -1), 1, "fisk5");
 
-assertEquals(one / (0 / -1), -Infinity, "hest");
-assertEquals(one / (0 / 1), Infinity, "hest");
+assertEquals(one / (0 / -1), -Infinity, "hest1");
+assertEquals(one / (0 / 1), Infinity, "hest2");
 
-assertEquals(one / (-4 % 2), -Infinity, "fiskhest");
-assertEquals(one / (-4 % -2), -Infinity, "fiskhest");
-assertEquals(one / (4 % 2), Infinity, "fiskhest");
-assertEquals(one / (4 % -2), Infinity, "fiskhest");
+assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
+assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
+assertEquals(one / (4 % 2), Infinity, "fiskhest3");
+assertEquals(one / (4 % -2), Infinity, "fiskhest4");
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 1222ea9..57bcc71 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -40,6 +40,7 @@
   'target_defaults': {
     'defines': [
       'ENABLE_LOGGING_AND_PROFILING',
+      'ENABLE_DEBUGGER_SUPPORT',
     ],
     'conditions': [
       ['target_arch=="arm"', {
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 3e714de..c4f7853 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -30,7 +30,7 @@
 
 import optparse
 import os
-from os.path import abspath, join, dirname, basename
+from os.path import abspath, join, dirname, basename, exists
 import re
 import sys
 import subprocess
@@ -103,7 +103,7 @@
     all_files = []
     for file in self.GetPathsToSearch():
       all_files += self.FindFilesIn(join(path, file))
-    if not self.ProcessFiles(all_files):
+    if not self.ProcessFiles(all_files, path):
       return False
     return True
 
@@ -145,9 +145,12 @@
   def GetPathsToSearch(self):
     return ['src', 'public', 'samples', join('test', 'cctest')]
 
-  def ProcessFiles(self, files):
+  def ProcessFiles(self, files, path):
     filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
     command = ['cpplint.py', '--filter', filt] + join(files)
+    local_cpplint = join(path, "tools", "cpplint.py")
+    if exists(local_cpplint):
+      command = ['python', local_cpplint, '--filter', filt] + join(files)
     process = subprocess.Popen(command)
     return process.wait() == 0
 
@@ -194,7 +197,7 @@
         result = False
     return result
 
-  def ProcessFiles(self, files):
+  def ProcessFiles(self, files, path):
     success = True
     for file in files:
       try:
diff --git a/tools/test.py b/tools/test.py
index c1b8b80..3a60c59 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -1084,6 +1084,8 @@
       choices=PROGRESS_INDICATORS.keys(), default="mono")
   result.add_option("--no-build", help="Don't build requirements",
       default=False, action="store_true")
+  result.add_option("--build-only", help="Only build requirements, don't run the tests",
+      default=False, action="store_true")
   result.add_option("--report", help="Print a summary of the tests to be run",
       default=False, action="store_true")
   result.add_option("-s", "--suite", help="A test suite",
@@ -1261,6 +1263,10 @@
       if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
         return 1
 
+  # Just return if we are only building the targets for running the tests.
+  if options.build_only:
+    return 0
+  
   # Get status for tests
   sections = [ ]
   defs = { }
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index f9241f9..79ece72 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -1489,6 +1489,7 @@
 					V8_TARGET_ARCH_IA32,
 					V8_NATIVE_REGEXP,
 					ENABLE_LOGGING_AND_PROFILING,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8;
@@ -1537,6 +1538,7 @@
 					V8_TARGET_ARCH_ARM,
 					ENABLE_DISASSEMBLER,
 					ENABLE_LOGGING_AND_PROFILING,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = "v8-arm";
diff --git a/tools/visual_studio/common.vsprops b/tools/visual_studio/common.vsprops
index 238dd97..213a081 100644
--- a/tools/visual_studio/common.vsprops
+++ b/tools/visual_studio/common.vsprops
@@ -8,7 +8,7 @@
 	<Tool
 		Name="VCCLCompilerTool"
 		AdditionalIncludeDirectories="$(ProjectDir)\..\..\src;$(IntDir)\DerivedSources"
-		PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_LOGGING_AND_PROFILING"
+		PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_LOGGING_AND_PROFILING;ENABLE_DEBUGGER_SUPPORT"
 		MinimalRebuild="false"
 		ExceptionHandling="0"
 		RuntimeTypeInfo="false"